repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | ceph-main/src/librbd/operation/EnableFeaturesRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_ENABLE_FEATURES_REQUEST_H
#define CEPH_LIBRBD_OPERATION_ENABLE_FEATURES_REQUEST_H
#include "librbd/operation/Request.h"
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class EnableFeaturesRequest : public Request<ImageCtxT> {
public:
static EnableFeaturesRequest *create(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid,
uint64_t features) {
return new EnableFeaturesRequest(image_ctx, on_finish, journal_op_tid,
features);
}
EnableFeaturesRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid, uint64_t features);
protected:
void send_op() override;
bool should_complete(int r) override;
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::UpdateFeaturesEvent(op_tid, m_features, true);
}
private:
/**
* EnableFeatures goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_PREPARE_LOCK
* |
* v
* STATE_BLOCK_WRITES
* |
* v
* STATE_GET_MIRROR_MODE
* |
* v
* STATE_CREATE_JOURNAL (skip if not
* | required)
* v
* STATE_APPEND_OP_EVENT (skip if journaling
* | disabled)
* v
* STATE_UPDATE_FLAGS
* |
* v
* STATE_SET_FEATURES
* |
* v
* STATE_CREATE_OBJECT_MAP (skip if not
* | required)
* v
* STATE_ENABLE_MIRROR_IMAGE
* |
* V
* STATE_NOTIFY_UPDATE
* |
* | (unblock writes)
* v
* <finish>
* @endverbatim
*
*/
uint64_t m_features;
bool m_enable_mirroring = false;
bool m_requests_blocked = false;
bool m_writes_blocked = false;
uint64_t m_new_features = 0;
uint64_t m_enable_flags = 0;
uint64_t m_features_mask = 0;
bufferlist m_out_bl;
void send_prepare_lock();
Context *handle_prepare_lock(int *result);
void send_block_writes();
Context *handle_block_writes(int *result);
void send_get_mirror_mode();
Context *handle_get_mirror_mode(int *result);
void send_create_journal();
Context *handle_create_journal(int *result);
void send_append_op_event();
Context *handle_append_op_event(int *result);
void send_update_flags();
Context *handle_update_flags(int *result);
void send_set_features();
Context *handle_set_features(int *result);
void send_create_object_map();
Context *handle_create_object_map(int *result);
void send_enable_mirror_image();
Context *handle_enable_mirror_image(int *result);
void send_notify_update();
Context *handle_notify_update(int *result);
Context *handle_finish(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::EnableFeaturesRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_ENABLE_FEATURES_REQUEST_H
| 3,231 | 22.764706 | 81 | h |
null | ceph-main/src/librbd/operation/MetadataRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offremove:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_METADATA_REMOVE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_METADATA_REMOVE_REQUEST_H
#include "librbd/operation/Request.h"
#include <iosfwd>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class MetadataRemoveRequest : public Request<ImageCtxT> {
public:
MetadataRemoveRequest(ImageCtxT &image_ctx, Context *on_finish,
const std::string &key);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::MetadataRemoveEvent(op_tid, m_key);
}
private:
std::string m_key;
void send_metadata_remove();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::MetadataRemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_METADATA_REMOVE_REQUEST_H
| 1,048 | 22.311111 | 81 | h |
null | ceph-main/src/librbd/operation/MetadataSetRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_METADATA_SET_REQUEST_H
#define CEPH_LIBRBD_OPERATION_METADATA_SET_REQUEST_H
#include "librbd/operation/Request.h"
#include "include/buffer.h"
#include <string>
#include <map>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class MetadataSetRequest : public Request<ImageCtxT> {
public:
MetadataSetRequest(ImageCtxT &image_ctx, Context *on_finish,
const std::string &key, const std::string &value);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::MetadataSetEvent(op_tid, m_key, m_value);
}
private:
std::string m_key;
std::string m_value;
std::map<std::string, bufferlist> m_data;
void send_metadata_set();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::MetadataSetRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_METADATA_SET_REQUEST_H
| 1,145 | 22.875 | 78 | h |
null | ceph-main/src/librbd/operation/MigrateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_MIGRATE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_MIGRATE_REQUEST_H
#include "librbd/operation/Request.h"
#include "librbd/Types.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class MigrateRequest : public Request<ImageCtxT>
{
public:
MigrateRequest(ImageCtxT &image_ctx, Context *on_finish,
ProgressContext &prog_ctx)
: Request<ImageCtxT>(image_ctx, on_finish), m_prog_ctx(prog_ctx) {
}
protected:
void send_op() override;
bool should_complete(int r) override;
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
ceph_abort();
return journal::UnknownEvent();
}
private:
/**
* Migrate goes through the following state machine to copy objects
* from the parent (migrating source) image:
*
* @verbatim
*
* <start>
* |
* v
* MIGRATE_OBJECTS
* |
* v
* <finish>
*
* @endverbatim
*
*/
ProgressContext &m_prog_ctx;
void migrate_objects();
void handle_migrate_objects(int r);
uint64_t get_num_overlap_objects();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::MigrateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_MIGRATE_REQUEST_H
| 1,477 | 20.42029 | 74 | h |
null | ceph-main/src/librbd/operation/ObjectMapIterate.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_OBJECT_MAP_ITERATE_H
#define CEPH_LIBRBD_OPERATION_OBJECT_MAP_ITERATE_H
#include <iostream>
#include <atomic>
#include "include/int_types.h"
#include "include/rbd/object_map_types.h"
#include "librbd/AsyncRequest.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
using ObjectIterateWork = bool(*)(ImageCtxT &image_ctx,
uint64_t object_no,
uint8_t current_state,
uint8_t new_state);
template <typename ImageCtxT = ImageCtx>
class ObjectMapIterateRequest : public AsyncRequest<ImageCtxT> {
public:
ObjectMapIterateRequest(ImageCtxT &image_ctx, Context *on_finish,
ProgressContext &prog_ctx,
ObjectIterateWork<ImageCtxT> handle_mismatch)
: AsyncRequest<ImageCtxT>(image_ctx, on_finish), m_image_ctx(image_ctx),
m_prog_ctx(prog_ctx), m_handle_mismatch(handle_mismatch)
{
}
void send() override;
protected:
bool should_complete(int r) override;
private:
enum State {
STATE_VERIFY_OBJECTS,
STATE_INVALIDATE_OBJECT_MAP
};
ImageCtxT &m_image_ctx;
ProgressContext &m_prog_ctx;
ObjectIterateWork<ImageCtxT> m_handle_mismatch;
std::atomic_flag m_invalidate = ATOMIC_FLAG_INIT;
State m_state = STATE_VERIFY_OBJECTS;
void send_verify_objects();
void send_invalidate_object_map();
uint64_t get_image_size() const;
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::ObjectMapIterateRequest<librbd::ImageCtx>;
#endif
| 1,633 | 23.757576 | 83 | h |
null | ceph-main/src/librbd/operation/RebuildObjectMapRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_REBUILD_OBJECT_MAP_REQUEST_H
#define CEPH_LIBRBD_OPERATION_REBUILD_OBJECT_MAP_REQUEST_H
#include "include/int_types.h"
#include "librbd/AsyncRequest.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class RebuildObjectMapRequest : public AsyncRequest<ImageCtxT> {
public:
RebuildObjectMapRequest(ImageCtxT &image_ctx, Context *on_finish,
ProgressContext &prog_ctx)
: AsyncRequest<ImageCtxT>(image_ctx, on_finish), m_image_ctx(image_ctx),
m_prog_ctx(prog_ctx), m_attempted_trim(false)
{
}
void send() override;
protected:
bool should_complete(int r) override;
private:
/**
* Rebuild object map goes through the following state machine to
* verify per-object state:
*
* <start>
* . | . . . . . . . . . .
* . | . .
* . v v .
* . STATE_RESIZE_OBJECT_MAP . . . > STATE_TRIM_IMAGE
* . |
* . v
* . . . > STATE_VERIFY_OBJECTS
* |
* v
* STATE_SAVE_OBJECT_MAP
* |
* v
* STATE_UPDATE_HEADER
*
* The _RESIZE_OBJECT_MAP state will be skipped if the object map
* is appropriately sized for the image. The _TRIM_IMAGE state will
* only be hit if the resize failed due to an in-use object.
*/
enum State {
STATE_RESIZE_OBJECT_MAP,
STATE_TRIM_IMAGE,
STATE_VERIFY_OBJECTS,
STATE_SAVE_OBJECT_MAP,
STATE_UPDATE_HEADER
};
ImageCtxT &m_image_ctx;
ProgressContext &m_prog_ctx;
State m_state = STATE_RESIZE_OBJECT_MAP;
bool m_attempted_trim;
void send_resize_object_map();
void send_trim_image();
void send_verify_objects();
void send_save_object_map();
void send_update_header();
uint64_t get_image_size() const;
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::RebuildObjectMapRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_REBUILD_OBJECT_MAP_REQUEST_H
| 2,232 | 25.270588 | 83 | h |
null | ceph-main/src/librbd/operation/Request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_REQUEST_H
#define CEPH_LIBRBD_OPERATION_REQUEST_H
#include "librbd/AsyncRequest.h"
#include "include/Context.h"
#include "librbd/Utils.h"
#include "librbd/Journal.h"
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class Request : public AsyncRequest<ImageCtxT> {
public:
Request(ImageCtxT &image_ctx, Context *on_finish,
uint64_t journal_op_tid = 0);
void send();
protected:
void finish(int r) override;
virtual void send_op() = 0;
virtual bool can_affect_io() const {
return false;
}
virtual journal::Event create_event(uint64_t op_tid) const = 0;
template <typename T, Context*(T::*MF)(int*)>
bool append_op_event(T *request) {
ImageCtxT &image_ctx = this->m_image_ctx;
ceph_assert(can_affect_io());
std::scoped_lock locker{image_ctx.owner_lock, image_ctx.image_lock};
if (image_ctx.journal != nullptr) {
if (image_ctx.journal->is_journal_replaying()) {
Context *ctx = util::create_context_callback<T, MF>(request);
replay_op_ready(ctx);
return true;
} else if (image_ctx.journal->is_journal_appending()) {
Context *ctx = util::create_context_callback<T, MF>(request);
append_op_event(ctx);
return true;
}
}
return false;
}
bool append_op_event();
// NOTE: temporary until converted to new state machine format
Context *create_context_finisher(int r);
void finish_and_destroy(int r) override;
private:
struct C_AppendOpEvent : public Context {
Request *request;
Context *on_safe;
C_AppendOpEvent(Request *request, Context *on_safe)
: request(request), on_safe(on_safe) {
}
void finish(int r) override {
if (r >= 0) {
request->m_appended_op_event = true;
}
on_safe->complete(r);
}
};
struct C_CommitOpEvent : public Context {
Request *request;
int ret_val;
C_CommitOpEvent(Request *request, int ret_val)
: request(request), ret_val(ret_val) {
}
void finish(int r) override {
request->handle_commit_op_event(r, ret_val);
delete request;
}
};
uint64_t m_op_tid = 0;
bool m_appended_op_event = false;
bool m_committed_op_event = false;
void replay_op_ready(Context *on_safe);
void append_op_event(Context *on_safe);
void handle_op_event_safe(int r);
bool commit_op_event(int r);
void handle_commit_op_event(int r, int original_ret_val);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::Request<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_REQUEST_H
| 2,749 | 24.700935 | 72 | h |
null | ceph-main/src/librbd/operation/ResizeRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_RESIZE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_RESIZE_REQUEST_H
#include "librbd/operation/Request.h"
#include "include/xlist.h"
namespace librbd
{
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class ResizeRequest : public Request<ImageCtxT> {
public:
static ResizeRequest *create(ImageCtxT &image_ctx, Context *on_finish,
uint64_t new_size, bool allow_shrink,
ProgressContext &prog_ctx, uint64_t journal_op_tid,
bool disable_journal) {
return new ResizeRequest(image_ctx, on_finish, new_size, allow_shrink, prog_ctx,
journal_op_tid, disable_journal);
}
ResizeRequest(ImageCtxT &image_ctx, Context *on_finish, uint64_t new_size,
bool allow_shrink, ProgressContext &prog_ctx, uint64_t journal_op_tid,
bool disable_journal);
~ResizeRequest() override;
inline bool shrinking() const {
return (m_shrink_size_visible && m_new_size < m_original_size);
}
inline uint64_t get_image_size() const {
return m_new_size;
}
void send() override;
protected:
void send_op() override;
bool should_complete(int r) override {
return true;
}
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::ResizeEvent(op_tid, m_new_size);
}
private:
/**
* Resize goes through the following state machine to resize the image
* and update the object map:
*
* @verbatim
*
* <start>
* |
* v
* STATE_PRE_BLOCK_WRITES
* |
* v
* STATE_APPEND_OP_EVENT (skip if journaling
* | disabled)
* |
* | (grow)
* |\--------> STATE_GROW_OBJECT_MAP (skip if object map
* | | disabled)
* | v
* | STATE_UPDATE_HEADER ----------------------------\
* | (unblock writes) |
* | |
* | (unblock writes) |
* | |
* | (shrink) |
* |\--------> STATE_FLUSH_CACHE |
* | | |
* | v |
* | STATE_INVALIDATE_CACHE |
* | | |
* | v |
* | STATE_TRIM_IMAGE |
* | | |
* | v |
* | STATE_POST_BLOCK_WRITES |
* | | |
* | v |
* | STATE_UPDATE_HEADER |
* | | |
* | v |
* | STATE_SHRINK_OBJECT_MAP (skip if object map |
* | | disabled) |
* | | (unblock writes) |
* | (no change) v |
* \------------> <finish> <-----------------------------------/
*
* @endverbatim
*
* The _OBJECT_MAP states are skipped if the object map isn't enabled.
* The state machine will immediately transition to _FINISHED if there
* are no objects to trim.
*/
uint64_t m_original_size;
uint64_t m_new_size;
bool m_allow_shrink = true;
ProgressContext &m_prog_ctx;
uint64_t m_new_parent_overlap;
bool m_shrink_size_visible = false;
bool m_disable_journal = false;
typename xlist<ResizeRequest<ImageCtxT>*>::item m_xlist_item;
void send_pre_block_writes();
Context *handle_pre_block_writes(int *result);
Context *send_append_op_event();
Context *handle_append_op_event(int *result);
void send_flush_cache();
Context *handle_flush_cache(int *result);
void send_invalidate_cache();
Context *handle_invalidate_cache(int *result);
void send_trim_image();
Context *handle_trim_image(int *result);
Context *send_grow_object_map();
Context *handle_grow_object_map(int *result);
Context *send_shrink_object_map();
Context *handle_shrink_object_map(int *result);
void send_post_block_writes();
Context *handle_post_block_writes(int *result);
void send_update_header();
Context *handle_update_header(int *result);
void compute_parent_overlap();
void update_size_and_overlap();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::ResizeRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_RESIZE_REQUEST_H
| 5,353 | 33.101911 | 86 | h |
null | ceph-main/src/librbd/operation/SnapshotCreateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_CREATE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_CREATE_REQUEST_H
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Types.h"
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotCreateRequest : public Request<ImageCtxT> {
public:
/**
* Snap Create goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_NOTIFY_QUIESCE * * * * * * * * * * * * *
* | *
* v *
* STATE_SUSPEND_REQUESTS *
* | *
* v *
* STATE_SUSPEND_AIO * * * * * * * * * * * * * * *
* | *
* v *
* STATE_APPEND_OP_EVENT (skip if journal *
* | disabled) *
* (retry) v *
* . . . > STATE_ALLOCATE_SNAP_ID *
* . | *
* . v *
* . . . . STATE_CREATE_SNAP * * * * * * * * * * * *
* | * *
* v * *
* STATE_CREATE_OBJECT_MAP (skip if * *
* | disabled) * *
* v * *
* STATE_CREATE_IMAGE_STATE (skip if * *
* | not mirror * *
* | snapshot) * *
* | v *
* | STATE_RELEASE_SNAP_ID *
* | | *
* | v *
* \------------> STATE_NOTIFY_UNQUIESCE < * *
* |
* v
* <finish>
* @endverbatim
*
* The _CREATE_STATE state may repeat back to the _ALLOCATE_SNAP_ID state
* if a stale snapshot context is allocated. If the create operation needs
* to abort, the error path is followed to record the result in the journal
* (if enabled) and bubble the originating error code back to the client.
*/
SnapshotCreateRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, uint64_t journal_op_tid,
uint64_t flags, ProgressContext &prog_ctx);
protected:
void send_op() override;
bool should_complete(int r) override {
return true;
}
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapCreateEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
bool m_skip_object_map;
bool m_skip_notify_quiesce;
bool m_ignore_notify_quiesce_error;
ProgressContext &m_prog_ctx;
uint64_t m_request_id = 0;
int m_ret_val = 0;
bool m_writes_blocked = false;
uint64_t m_snap_id = CEPH_NOSNAP;
uint64_t m_size;
ParentImageInfo m_parent_info;
void send_notify_quiesce();
Context *handle_notify_quiesce(int *result);
void send_suspend_requests();
Context *handle_suspend_requests(int *result);
void send_suspend_aio();
Context *handle_suspend_aio(int *result);
void send_append_op_event();
Context *handle_append_op_event(int *result);
void send_allocate_snap_id();
Context *handle_allocate_snap_id(int *result);
void send_create_snap();
Context *handle_create_snap(int *result);
Context *send_create_object_map();
Context *handle_create_object_map(int *result);
Context *send_create_image_state();
Context *handle_create_image_state(int *result);
void send_release_snap_id();
Context *handle_release_snap_id(int *result);
Context *send_notify_unquiesce();
Context *handle_notify_unquiesce(int *result);
void update_snap_context();
void save_result(int *result) {
if (m_ret_val == 0 && *result < 0) {
m_ret_val = *result;
}
}
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotCreateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_CREATE_REQUEST_H
| 5,112 | 33.315436 | 81 | h |
null | ceph-main/src/librbd/operation/SnapshotLimitRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_LIMIT_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_LIMIT_REQUEST_H
#include "librbd/operation/Request.h"
#include <iosfwd>
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotLimitRequest : public Request<ImageCtxT> {
public:
SnapshotLimitRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t limit);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapLimitEvent(op_tid, m_snap_limit);
}
private:
uint64_t m_snap_limit;
void send_limit_snaps();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotLimitRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_LIMIT_REQUEST_H
| 1,018 | 21.644444 | 80 | h |
null | ceph-main/src/librbd/operation/SnapshotProtectRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_PROTECT_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_PROTECT_REQUEST_H
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotProtectRequest : public Request<ImageCtxT> {
public:
/**
* Snap Protect goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_PROTECT_SNAP
* |
* v
* <finish>
*
* @endverbatim
*
*/
enum State {
STATE_PROTECT_SNAP
};
SnapshotProtectRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapProtectEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
State m_state;
void send_protect_snap();
int verify_and_send_protect_snap();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotProtectRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_PROTECT_REQUEST_H
| 1,469 | 20.304348 | 82 | h |
null | ceph-main/src/librbd/operation/SnapshotRemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_REMOVE_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_REMOVE_REQUEST_H
#include "librbd/operation/Request.h"
#include "include/buffer.h"
#include "librbd/Types.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotRemoveRequest : public Request<ImageCtxT> {
public:
/**
* @verbatim
*
* <start>
* |
* v
* TRASH_SNAP
* |
* v (skip if unsupported)
* GET_SNAP
* |
* v (skip if unnecessary)
* LIST_CHILDREN <-------------\
* | |
* v (skip if unnecessary) | (repeat as needed)
* DETACH_STALE_CHILD ---------/
* |
* v (skip if unnecessary)
* DETACH_CHILD
* |
* v (skip if disabled/in-use)
* REMOVE_OBJECT_MAP
* |
* v (skip if not mirror snapshot)
* REMOVE_IMAGE_STATE
* |
* v (skip if in-use)
* RELEASE_SNAP_ID
* |
* v (skip if in-use)
* REMOVE_SNAP
* |
* v
* <finish>
*
* @endverbatim
*/
static SnapshotRemoveRequest *create(
ImageCtxT &image_ctx, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, uint64_t snap_id, Context *on_finish) {
return new SnapshotRemoveRequest(image_ctx, on_finish, snap_namespace,
snap_name, snap_id);
}
SnapshotRemoveRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t snap_id);
protected:
void send_op() override;
bool should_complete(int r) override;
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapRemoveEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
cls::rbd::ChildImageSpecs m_child_images;
std::string m_snap_name;
uint64_t m_snap_id;
bool m_trashed_snapshot = false;
bool m_child_attached = false;
ceph::bufferlist m_out_bl;
void trash_snap();
void handle_trash_snap(int r);
void get_snap();
void handle_get_snap(int r);
void list_children();
void handle_list_children(int r);
void detach_stale_child();
void handle_detach_stale_child(int r);
void detach_child();
void handle_detach_child(int r);
void remove_object_map();
void handle_remove_object_map(int r);
void remove_image_state();
void handle_remove_image_state(int r);
void release_snap_id();
void handle_release_snap_id(int r);
void remove_snap();
void handle_remove_snap(int r);
void remove_snap_context();
int scan_for_parents(cls::rbd::ParentImageSpec &pspec);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotRemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_REMOVE_REQUEST_H
| 3,047 | 22.627907 | 81 | h |
null | ceph-main/src/librbd/operation/SnapshotRollbackRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_ROLLBACK_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_ROLLBACK_REQUEST_H
#include "librbd/operation/Request.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/journal/Types.h"
#include <string>
class Context;
namespace librbd {
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotRollbackRequest : public Request<ImageCtxT> {
public:
/**
* Snap Rollback goes through the following state machine:
*
* @verbatim
*
* <start> ---------\
* |
* v
* STATE_BLOCK_WRITES
* |
* v
* STATE_RESIZE_IMAGE (skip if resize not
* | required)
* v
* STATE_GET_SNAP_OBJECT_MAP (skip if object)
* | map disabled)
* v
* STATE_ROLLBACK_OBJECT_MAP (skip if object
* | map disabled)
* v
* STATE_ROLLBACK_OBJECTS
* |
* v
* STATE_REFRESH_OBJECT_MAP (skip if object
* | map disabled)
* v
* STATE_INVALIDATE_CACHE (skip if cache
* | disabled)
* v
* <finish>
*
* @endverbatim
*
* The _RESIZE_IMAGE state is skipped if the image doesn't need to be resized.
* The _ROLLBACK_OBJECT_MAP state is skipped if the object map isn't enabled.
* The _INVALIDATE_CACHE state is skipped if the cache isn't enabled.
*/
SnapshotRollbackRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t snap_id,
uint64_t snap_size, ProgressContext &prog_ctx);
~SnapshotRollbackRequest() override;
protected:
void send_op() override;
bool should_complete(int r) override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapRollbackEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
uint64_t m_snap_id;
uint64_t m_snap_size;
uint64_t m_head_num_objects;
ProgressContext &m_prog_ctx;
NoOpProgressContext m_no_op_prog_ctx;
bool m_blocking_writes = false;
decltype(ImageCtxT::object_map) m_object_map;
decltype(ImageCtxT::object_map) m_snap_object_map;
void send_block_writes();
Context *handle_block_writes(int *result);
void send_resize_image();
Context *handle_resize_image(int *result);
void send_get_snap_object_map();
Context *handle_get_snap_object_map(int *result);
void send_rollback_object_map();
Context *handle_rollback_object_map(int *result);
void send_rollback_objects();
Context *handle_rollback_objects(int *result);
Context *send_refresh_object_map();
Context *handle_refresh_object_map(int *result);
Context *send_invalidate_cache();
Context *handle_invalidate_cache(int *result);
void apply();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotRollbackRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_ROLLBACK_REQUEST_H
| 3,587 | 28.170732 | 83 | h |
null | ceph-main/src/librbd/operation/SnapshotUnprotectRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SNAPSHOT_UNPROTECT_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SNAPSHOT_UNPROTECT_REQUEST_H
#include "librbd/operation/Request.h"
#include <string>
class Context;
namespace librbd {
class ImageCtx;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SnapshotUnprotectRequest : public Request<ImageCtxT> {
public:
/**
* Snap Unprotect goes through the following state machine:
*
* @verbatim
*
* <start>
* |
* v
* STATE_UNPROTECT_SNAP_START
* |
* v
* STATE_SCAN_POOL_CHILDREN * * * * > STATE_UNPROTECT_SNAP_ROLLBACK
* | |
* v |
* STATE_UNPROTECT_SNAP_FINISH |
* | |
* v |
* <finish> <----------------------------/
*
* @endverbatim
*
* If the unprotect operation needs to abort, the error path is followed
* to rollback the unprotect in-progress status on the image.
*/
enum State {
STATE_UNPROTECT_SNAP_START,
STATE_SCAN_POOL_CHILDREN,
STATE_UNPROTECT_SNAP_FINISH,
STATE_UNPROTECT_SNAP_ROLLBACK
};
SnapshotUnprotectRequest(ImageCtxT &image_ctx, Context *on_finish,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name);
protected:
void send_op() override;
bool should_complete(int r) override;
int filter_return_code(int r) const override {
if (m_ret_val < 0) {
return m_ret_val;
}
return 0;
}
journal::Event create_event(uint64_t op_tid) const override {
return journal::SnapUnprotectEvent(op_tid, m_snap_namespace, m_snap_name);
}
private:
cls::rbd::SnapshotNamespace m_snap_namespace;
std::string m_snap_name;
State m_state;
int m_ret_val;
uint64_t m_snap_id;
bool should_complete_error();
void send_unprotect_snap_start();
void send_scan_pool_children();
void send_unprotect_snap_finish();
void send_unprotect_snap_rollback();
int verify_and_send_unprotect_snap_start();
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SnapshotUnprotectRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SNAPSHOT_UNPROTECT_REQUEST_H
| 2,390 | 24.168421 | 84 | h |
null | ceph-main/src/librbd/operation/SparsifyRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_SPARSIFY_REQUEST_H
#define CEPH_LIBRBD_OPERATION_SPARSIFY_REQUEST_H
#include "librbd/operation/Request.h"
#include "common/snap_types.h"
namespace librbd {
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class SparsifyRequest : public Request<ImageCtxT>
{
public:
SparsifyRequest(ImageCtxT &image_ctx, size_t sparse_size, Context *on_finish,
ProgressContext &prog_ctx)
: Request<ImageCtxT>(image_ctx, on_finish), m_sparse_size(sparse_size),
m_prog_ctx(prog_ctx) {
}
protected:
void send_op() override;
bool should_complete(int r) override;
bool can_affect_io() const override {
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
ceph_abort();
return journal::UnknownEvent();
}
private:
/**
* @verbatim
*
* <start>
* |
* v
* SPARSIFY OBJECTS
* |
* v
* <finish>
*
* @endverbatim
*/
size_t m_sparse_size;
ProgressContext &m_prog_ctx;
void sparsify_objects();
void handle_sparsify_objects(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::SparsifyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_SPARSIFY_REQUEST_H
| 1,404 | 20.615385 | 79 | h |
null | ceph-main/src/librbd/operation/TrimRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATION_TRIM_REQUEST_H
#define CEPH_LIBRBD_OPERATION_TRIM_REQUEST_H
#include "librbd/AsyncRequest.h"
namespace librbd
{
class ImageCtx;
class ProgressContext;
namespace operation {
template <typename ImageCtxT = ImageCtx>
class TrimRequest : public AsyncRequest<ImageCtxT>
{
public:
static TrimRequest *create(ImageCtxT &image_ctx, Context *on_finish,
uint64_t original_size, uint64_t new_size,
ProgressContext &prog_ctx) {
return new TrimRequest(image_ctx, on_finish, original_size, new_size,
prog_ctx);
}
TrimRequest(ImageCtxT &image_ctx, Context *on_finish,
uint64_t original_size, uint64_t new_size,
ProgressContext &prog_ctx);
void send() override;
protected:
/**
* Trim goes through the following state machine to remove whole objects,
* clean partially trimmed objects, and update the object map:
*
* @verbatim
*
* <start> . . . . . . . . . . . . . . . . .
* | .
* v (skip if not needed) .
* STATE_PRE_TRIM .
* | .
* v (skip if not needed) .
* STATE_COPYUP_OBJECTS .
* | .
* v (skip if not needed) .
* STATE_REMOVE_OBJECTS .
* | .
* v (skip if not needed) .
* STATE_POST_TRIM .
* | .
* v (skip if not needed) .
* STATE_CLEAN_BOUNDARY .
* | .
* v .
* STATE_FINISHED < . . . . . . . . . . . . . . .
* |
* v
* <finish>
*
* The _COPYUP_OBJECTS state is skipped if there is no parent overlap
* within the new image size and the image does not have any snapshots.
* The _PRE_TRIM/_POST_TRIM states are skipped if the object map
* isn't enabled. The _REMOVE_OBJECTS state is skipped if no whole objects
* are removed. The _CLEAN_BOUNDARY state is skipped if no boundary
* objects are cleaned. The state machine will immediately transition
* to _FINISHED state if there are no bytes to trim.
*/
enum State {
STATE_PRE_TRIM,
STATE_COPYUP_OBJECTS,
STATE_REMOVE_OBJECTS,
STATE_POST_TRIM,
STATE_CLEAN_BOUNDARY,
STATE_FINISHED
};
bool should_complete(int r) override;
State m_state = STATE_PRE_TRIM;
private:
uint64_t m_delete_start;
uint64_t m_delete_start_min = 0;
uint64_t m_num_objects;
uint64_t m_delete_off;
uint64_t m_new_size;
ProgressContext &m_prog_ctx;
void send_pre_trim();
void send_copyup_objects();
void send_remove_objects();
void send_post_trim();
void send_clean_boundary();
void send_finish(int r);
};
} // namespace operation
} // namespace librbd
extern template class librbd::operation::TrimRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATION_TRIM_REQUEST_H
| 3,361 | 30.12963 | 76 | h |
null | ceph-main/src/librbd/plugin/Api.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_API_H
#define CEPH_LIBRBD_PLUGIN_API_H
#include "common/Timer.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "librbd/io/Types.h"
#include "librbd/io/ReadResult.h"
namespace ZTracer { struct Trace; }
namespace librbd {
namespace io {
class AioCompletion;
class C_AioRequest;
}
struct ImageCtx;
namespace plugin {
template <typename ImageCtxT>
struct Api {
using Extents = librbd::io::Extents;
Api() {}
virtual ~Api() {}
virtual void read_parent(
ImageCtxT *image_ctx, uint64_t object_no, io::ReadExtents* extents,
librados::snap_t snap_id, const ZTracer::Trace &trace,
Context* on_finish);
virtual void execute_image_metadata_set(
ImageCtxT *image_ctx,
const std::string &key,
const std::string &value,
Context *on_finish);
virtual void execute_image_metadata_remove(
ImageCtxT *image_ctx,
const std::string &key,
Context *on_finish);
virtual void get_image_timer_instance(
CephContext *cct, SafeTimer **timer,
ceph::mutex **timer_lock);
virtual bool test_image_features(
ImageCtxT *image_ctx,
uint64_t features);
virtual void update_aio_comp(
io::AioCompletion* aio_comp,
uint32_t request_count,
io::ReadResult& read_result,
io::Extents &image_extents);
virtual void update_aio_comp(
io::AioCompletion* aio_comp,
uint32_t request_count);
virtual io::ReadResult::C_ImageReadRequest* create_image_read_request(
io::AioCompletion* aio_comp, uint64_t buffer_offset,
const Extents& image_extents);
virtual io::C_AioRequest* create_aio_request(io::AioCompletion* aio_comp);
private:
void start_in_flight_io(io::AioCompletion* aio_comp);
};
} // namespace plugin
} // namespace librbd
extern template class librbd::plugin::Api<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_API_H
| 2,076 | 23.435294 | 76 | h |
null | ceph-main/src/librbd/plugin/ParentCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_PARENT_CACHE_H
#define CEPH_LIBRBD_PLUGIN_PARENT_CACHE_H
#include "librbd/plugin/Types.h"
#include "include/Context.h"
namespace librbd {
struct ImageCtx;
namespace plugin {
template <typename ImageCtxT>
class ParentCache : public Interface<ImageCtxT> {
public:
ParentCache(CephContext* cct) : Interface<ImageCtxT>(cct) {
}
void init(ImageCtxT* image_ctx, Api<ImageCtxT>& api,
cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list,
Context* on_finish) override;
private:
void handle_init_parent_cache(int r, Context* on_finish);
using ceph::Plugin::cct;
};
} // namespace plugin
} // namespace librbd
extern template class librbd::plugin::ParentCache<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_PARENT_CACHE_H
| 927 | 22.794872 | 70 | h |
null | ceph-main/src/librbd/plugin/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_TYPES_H
#define CEPH_LIBRBD_PLUGIN_TYPES_H
#include "include/common_fwd.h"
#include "include/Context.h"
#include "common/PluginRegistry.h"
#include "librbd/cache/ImageWriteback.h"
namespace librbd {
namespace plugin {
template <typename> struct Api;
struct HookPoints {
virtual ~HookPoints() {
}
virtual void acquired_exclusive_lock(Context* on_finish) = 0;
virtual void prerelease_exclusive_lock(Context* on_finish) = 0;
virtual void discard(Context* on_finish) {
on_finish->complete(0);
}
};
typedef std::list<std::unique_ptr<HookPoints>> PluginHookPoints;
template <typename ImageCtxT>
struct Interface : public ceph::Plugin {
Interface(CephContext* cct) : Plugin(cct) {
}
virtual ~Interface() {
}
virtual void init(ImageCtxT* image_ctx, Api<ImageCtxT>& api,
librbd::cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list, Context* on_finish) = 0;
};
} // namespace plugin
} // namespace librbd
#endif // CEPH_LIBRBD_PLUGIN_TYPES_H
| 1,152 | 24.065217 | 80 | h |
null | ceph-main/src/librbd/plugin/WriteLogImageCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_WRITELOG_IMAGE_CACHE_H
#define CEPH_LIBRBD_PLUGIN_WRITELOG_IMAGE_CACHE_H
#include "librbd/plugin/Types.h"
#include "include/Context.h"
namespace librbd {
struct ImageCtx;
namespace plugin {
template <typename ImageCtxT>
class WriteLogImageCache : public Interface<ImageCtxT> {
public:
WriteLogImageCache(CephContext* cct) : Interface<ImageCtxT>(cct) {
}
~WriteLogImageCache() override;
void init(ImageCtxT* image_ctx, Api<ImageCtxT>& api,
cache::ImageWritebackInterface& image_writeback,
PluginHookPoints& hook_points_list,
Context* on_finish) override;
class HookPoints : public plugin::HookPoints {
public:
HookPoints(ImageCtxT* image_ctx,
cache::ImageWritebackInterface& image_writeback,
plugin::Api<ImageCtxT>& plugin_api);
~HookPoints() override;
void acquired_exclusive_lock(Context* on_finish) override;
void prerelease_exclusive_lock(Context* on_finish) override;
void discard(Context* on_finish) override;
private:
ImageCtxT* m_image_ctx;
cache::ImageWritebackInterface& m_image_writeback;
plugin::Api<ImageCtxT>& m_plugin_api;
};
};
} // namespace plugin
} // namespace librbd
extern template class librbd::plugin::WriteLogImageCache<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_WRITELOG_IMAGE_CACHE_H
| 1,455 | 25.962963 | 75 | h |
null | ceph-main/src/librbd/trash/MoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_MOVE_REQUEST_H
#define CEPH_LIBRBD_TRASH_MOVE_REQUEST_H
#include "include/common_fwd.h"
#include "include/utime.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
namespace trash {
template <typename ImageCtxT = librbd::ImageCtx>
class MoveRequest {
public:
static MoveRequest* create(librados::IoCtx& io_ctx,
const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec,
Context* on_finish) {
return new MoveRequest(io_ctx, image_id, trash_image_spec, on_finish);
}
MoveRequest(librados::IoCtx& io_ctx, const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec,
Context* on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id),
m_trash_image_spec(trash_image_spec), m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(io_ctx.cct())) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* TRASH_ADD
* |
* v
* REMOVE_ID
* |
* v
* DIRECTORY_REMOVE
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx &m_io_ctx;
std::string m_image_id;
cls::rbd::TrashImageSpec m_trash_image_spec;
Context *m_on_finish;
CephContext *m_cct;
void trash_add();
void handle_trash_add(int r);
void remove_id();
void handle_remove_id(int r);
void directory_remove();
void handle_directory_remove(int r);
void finish(int r);
};
} // namespace trash
} // namespace librbd
extern template class librbd::trash::MoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_TRASH_MOVE_REQUEST_H
| 1,897 | 20.568182 | 78 | h |
null | ceph-main/src/librbd/trash/RemoveRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_REMOVE_REQUEST_H
#define CEPH_LIBRBD_TRASH_REMOVE_REQUEST_H
#include "include/common_fwd.h"
#include "include/utime.h"
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
class Context;
namespace librbd {
struct ImageCtx;
class ProgressContext;
namespace asio { struct ContextWQ; }
namespace trash {
template <typename ImageCtxT = librbd::ImageCtx>
class RemoveRequest {
public:
static RemoveRequest* create(librados::IoCtx &io_ctx,
const std::string &image_id,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish) {
return new RemoveRequest(io_ctx, image_id, op_work_queue, force, prog_ctx,
on_finish);
}
static RemoveRequest* create(librados::IoCtx &io_ctx, ImageCtxT *image_ctx,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish) {
return new RemoveRequest(io_ctx, image_ctx, op_work_queue, force, prog_ctx,
on_finish);
}
RemoveRequest(librados::IoCtx &io_ctx, const std::string &image_id,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish)
: m_io_ctx(io_ctx), m_image_id(image_id), m_op_work_queue(op_work_queue),
m_force(force), m_prog_ctx(prog_ctx), m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(io_ctx.cct())) {
}
RemoveRequest(librados::IoCtx &io_ctx, ImageCtxT *image_ctx,
asio::ContextWQ *op_work_queue, bool force,
ProgressContext &prog_ctx, Context *on_finish)
: m_io_ctx(io_ctx), m_image_ctx(image_ctx), m_image_id(m_image_ctx->id),
m_op_work_queue(op_work_queue), m_force(force), m_prog_ctx(prog_ctx),
m_on_finish(on_finish),
m_cct(reinterpret_cast<CephContext *>(io_ctx.cct())) {
}
void send();
private:
/*
* @verbatim
*
* <start>
* |
* v
* SET_STATE (removing) * * * * * * *> CLOSE_IMAGE
* | |
* v |
* REMOVE_IMAGE * * *> SET_STATE (normal) |
* | | |
* v | |
* REMOVE_TRASH_ENTRY | |
* | | |
* v | |
* <finish> <-------------/<---------------/
*
* @endverbatim
*/
librados::IoCtx &m_io_ctx;
ImageCtxT *m_image_ctx = nullptr;
std::string m_image_id;
asio::ContextWQ *m_op_work_queue;
bool m_force;
ProgressContext &m_prog_ctx;
Context *m_on_finish;
CephContext *m_cct;
cls::rbd::TrashImageState m_trash_set_state =
cls::rbd::TRASH_IMAGE_STATE_REMOVING;
cls::rbd::TrashImageState m_trash_expect_state =
cls::rbd::TRASH_IMAGE_STATE_NORMAL;
int m_ret_val = 0;
void set_state();
void handle_set_state(int r);
void close_image();
void handle_close_image(int r);
void remove_image();
void handle_remove_image(int r);
void remove_trash_entry();
void handle_remove_trash_entry(int r);
void finish(int r);
};
} // namespace trash
} // namespace librbd
extern template class librbd::trash::RemoveRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_TRASH_REMOVE_REQUEST_H
| 3,589 | 29.168067 | 79 | h |
null | ceph-main/src/librbd/trash_watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_WATCHER_TYPES_H
#define CEPH_LIBRBD_TRASH_WATCHER_TYPES_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "cls/rbd/cls_rbd_types.h"
#include <iosfwd>
#include <list>
#include <string>
#include <boost/variant.hpp>
namespace librbd {
namespace trash_watcher {
enum NotifyOp {
NOTIFY_OP_IMAGE_ADDED = 0,
NOTIFY_OP_IMAGE_REMOVED = 1
};
struct ImageAddedPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_IMAGE_ADDED;
std::string image_id;
cls::rbd::TrashImageSpec trash_image_spec;
ImageAddedPayload() {
}
ImageAddedPayload(const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec)
: image_id(image_id), trash_image_spec(trash_image_spec) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct ImageRemovedPayload {
static const NotifyOp NOTIFY_OP = NOTIFY_OP_IMAGE_REMOVED;
std::string image_id;
ImageRemovedPayload() {
}
ImageRemovedPayload(const std::string& image_id)
: image_id(image_id) {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UnknownPayload {
static const NotifyOp NOTIFY_OP = static_cast<NotifyOp>(-1);
UnknownPayload() {
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
typedef boost::variant<ImageAddedPayload,
ImageRemovedPayload,
UnknownPayload> Payload;
struct NotifyMessage {
NotifyMessage(const Payload &payload = UnknownPayload()) : payload(payload) {
}
Payload payload;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<NotifyMessage *> &o);
};
WRITE_CLASS_ENCODER(NotifyMessage);
std::ostream &operator<<(std::ostream &out, const NotifyOp &op);
} // namespace trash_watcher
} // namespace librbd
using librbd::trash_watcher::encode;
using librbd::trash_watcher::decode;
#endif // CEPH_LIBRBD_TRASH_WATCHER_TYPES_H
| 2,375 | 23.244898 | 79 | h |
null | ceph-main/src/librbd/watcher/Notifier.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_NOTIFIER_H
#define CEPH_LIBRBD_WATCHER_NOTIFIER_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include <list>
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher {
struct NotifyResponse;
class Notifier {
public:
static const uint64_t NOTIFY_TIMEOUT;
Notifier(asio::ContextWQ *work_queue, librados::IoCtx &ioctx,
const std::string &oid);
~Notifier();
void flush(Context *on_finish);
void notify(bufferlist &bl, NotifyResponse *response, Context *on_finish);
private:
typedef std::list<Context*> Contexts;
struct C_AioNotify : public Context {
Notifier *notifier;
NotifyResponse *response;
Context *on_finish;
bufferlist out_bl;
C_AioNotify(Notifier *notifier, NotifyResponse *response,
Context *on_finish);
void finish(int r) override;
};
asio::ContextWQ *m_work_queue;
librados::IoCtx &m_ioctx;
CephContext *m_cct;
std::string m_oid;
ceph::mutex m_aio_notify_lock;
size_t m_pending_aio_notifies = 0;
Contexts m_aio_notify_flush_ctxs;
void handle_notify(int r, Context *on_finish);
};
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_NOTIFIER_H
| 1,437 | 21.123077 | 76 | h |
null | ceph-main/src/librbd/watcher/RewatchRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
#define CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
#include "common/ceph_mutex.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
struct Context;
namespace librbd {
namespace watcher {
class RewatchRequest {
public:
static RewatchRequest *create(librados::IoCtx& ioctx, const std::string& oid,
ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish) {
return new RewatchRequest(ioctx, oid, watch_lock, watch_ctx, watch_handle,
on_finish);
}
RewatchRequest(librados::IoCtx& ioctx, const std::string& oid,
ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* UNWATCH
* |
* | . . . .
* | . . (recoverable error)
* v v .
* REWATCH . . .
* |
* v
* <finish>
*
* @endverbatim
*/
librados::IoCtx& m_ioctx;
std::string m_oid;
ceph::shared_mutex &m_watch_lock;
librados::WatchCtx2 *m_watch_ctx;
uint64_t *m_watch_handle;
Context *m_on_finish;
uint64_t m_rewatch_handle = 0;
void unwatch();
void handle_unwatch(int r);
void rewatch();
void handle_rewatch(int r);
void finish(int r);
};
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
| 1,697 | 21.342105 | 80 | h |
null | ceph-main/src/librbd/watcher/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_TYPES_H
#define CEPH_LIBRBD_WATCHER_TYPES_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
namespace ceph { class Formatter; }
namespace librbd {
class Watcher;
namespace watcher {
struct ClientId {
uint64_t gid;
uint64_t handle;
ClientId() : gid(0), handle(0) {}
ClientId(uint64_t gid, uint64_t handle) : gid(gid), handle(handle) {}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
inline bool is_valid() const {
return (*this != ClientId());
}
inline bool operator==(const ClientId &rhs) const {
return (gid == rhs.gid && handle == rhs.handle);
}
inline bool operator!=(const ClientId &rhs) const {
return !(*this == rhs);
}
inline bool operator<(const ClientId &rhs) const {
if (gid != rhs.gid) {
return gid < rhs.gid;
} else {
return handle < rhs.handle;
}
}
};
struct NotifyResponse {
std::map<ClientId, bufferlist> acks;
std::vector<ClientId> timeouts;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
};
template <typename ImageCtxT>
struct Traits {
typedef librbd::Watcher Watcher;
};
std::ostream &operator<<(std::ostream &out,
const ClientId &client);
WRITE_CLASS_ENCODER(ClientId);
WRITE_CLASS_ENCODER(NotifyResponse);
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_TYPES_H
| 1,597 | 21.194444 | 71 | h |
null | ceph-main/src/librbd/watcher/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_UTILS_H
#define CEPH_LIBRBD_WATCHER_UTILS_H
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "include/Context.h"
#include "librbd/Watcher.h"
namespace ceph { class Formatter; }
namespace librbd {
namespace watcher {
namespace util {
template <typename Watcher>
struct HandlePayloadVisitor : public boost::static_visitor<void> {
Watcher *watcher;
uint64_t notify_id;
uint64_t handle;
HandlePayloadVisitor(Watcher *watcher_, uint64_t notify_id_,
uint64_t handle_)
: watcher(watcher_), notify_id(notify_id_), handle(handle_)
{
}
template <typename P>
inline void operator()(const P &payload) const {
typename Watcher::C_NotifyAck *ctx =
new typename Watcher::C_NotifyAck(watcher, notify_id, handle);
if (watcher->handle_payload(payload, ctx)) {
ctx->complete(0);
}
}
};
class EncodePayloadVisitor : public boost::static_visitor<void> {
public:
explicit EncodePayloadVisitor(bufferlist &bl) : m_bl(bl) {}
template <typename P>
inline void operator()(const P &payload) const {
using ceph::encode;
encode(static_cast<uint32_t>(P::NOTIFY_OP), m_bl);
payload.encode(m_bl);
}
private:
bufferlist &m_bl;
};
class DecodePayloadVisitor : public boost::static_visitor<void> {
public:
DecodePayloadVisitor(__u8 version, bufferlist::const_iterator &iter)
: m_version(version), m_iter(iter) {}
template <typename P>
inline void operator()(P &payload) const {
payload.decode(m_version, m_iter);
}
private:
__u8 m_version;
bufferlist::const_iterator &m_iter;
};
} // namespace util
} // namespace watcher
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_UTILS_H
| 1,804 | 23.066667 | 70 | h |
null | ceph-main/src/log/Entry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_LOG_ENTRY_H
#define __CEPH_LOG_ENTRY_H
#include "log/LogClock.h"
#include "common/StackStringStream.h"
#include "boost/container/small_vector.hpp"
#include <pthread.h>
#include <string_view>
namespace ceph {
namespace logging {
class Entry {
public:
using time = log_time;
Entry() = delete;
Entry(short pr, short sub) :
m_stamp(clock().now()),
m_thread(pthread_self()),
m_prio(pr),
m_subsys(sub)
{}
Entry(const Entry &) = default;
Entry& operator=(const Entry &) = default;
Entry(Entry &&e) = default;
Entry& operator=(Entry &&e) = default;
virtual ~Entry() = default;
virtual std::string_view strv() const = 0;
virtual std::size_t size() const = 0;
time m_stamp;
pthread_t m_thread;
short m_prio, m_subsys;
static log_clock& clock() {
static log_clock clock;
return clock;
}
};
/* This should never be moved to the heap! Only allocate this on the stack. See
* CachedStackStringStream for rationale.
*/
class MutableEntry : public Entry {
public:
MutableEntry() = delete;
MutableEntry(short pr, short sub) : Entry(pr, sub) {}
MutableEntry(const MutableEntry&) = delete;
MutableEntry& operator=(const MutableEntry&) = delete;
MutableEntry(MutableEntry&&) = delete;
MutableEntry& operator=(MutableEntry&&) = delete;
~MutableEntry() override = default;
std::ostream& get_ostream() {
return *cos;
}
std::string_view strv() const override {
return cos->strv();
}
std::size_t size() const override {
return cos->strv().size();
}
private:
CachedStackStringStream cos;
};
class ConcreteEntry : public Entry {
public:
ConcreteEntry() = delete;
ConcreteEntry(const Entry& e) : Entry(e) {
auto strv = e.strv();
str.reserve(strv.size());
str.insert(str.end(), strv.begin(), strv.end());
}
ConcreteEntry& operator=(const Entry& e) {
Entry::operator=(e);
auto strv = e.strv();
str.reserve(strv.size());
str.assign(strv.begin(), strv.end());
return *this;
}
ConcreteEntry(ConcreteEntry&& e) noexcept : Entry(e), str(std::move(e.str)) {}
ConcreteEntry& operator=(ConcreteEntry&& e) {
Entry::operator=(e);
str = std::move(e.str);
return *this;
}
~ConcreteEntry() override = default;
std::string_view strv() const override {
return std::string_view(str.data(), str.size());
}
std::size_t size() const override {
return str.size();
}
private:
boost::container::small_vector<char, 1024> str;
};
}
}
#endif
| 2,598 | 21.405172 | 80 | h |
null | ceph-main/src/log/Log.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_LOG_LOG_H
#define __CEPH_LOG_LOG_H
#include <boost/circular_buffer.hpp>
#include <condition_variable>
#include <memory>
#include <mutex>
#include <queue>
#include <string>
#include <string_view>
#include "common/Thread.h"
#include "common/likely.h"
#include "log/Entry.h"
#include <unistd.h>
struct uuid_d;
namespace ceph {
namespace logging {
class Graylog;
class JournaldLogger;
class SubsystemMap;
class Log : private Thread
{
public:
using Thread::is_started;
Log(const SubsystemMap *s);
~Log() override;
void set_flush_on_exit();
void set_coarse_timestamps(bool coarse);
void set_max_new(std::size_t n);
void set_max_recent(std::size_t n);
void set_log_file(std::string_view fn);
void reopen_log_file();
void chown_log_file(uid_t uid, gid_t gid);
void set_log_stderr_prefix(std::string_view p);
void set_stderr_fd(int fd);
void flush();
void dump_recent();
void set_syslog_level(int log, int crash);
void set_stderr_level(int log, int crash);
void set_graylog_level(int log, int crash);
void start_graylog(const std::string& host,
const uuid_d& fsid);
void stop_graylog();
void set_journald_level(int log, int crash);
void start_journald_logger();
void stop_journald_logger();
std::shared_ptr<Graylog> graylog() { return m_graylog; }
void submit_entry(Entry&& e);
void start();
void stop();
/// true if the log lock is held by our thread
bool is_inside_log_lock();
/// induce a segv on the next log event
void inject_segv();
void reset_segv();
protected:
using EntryVector = std::vector<ConcreteEntry>;
virtual void _flush(EntryVector& q, bool crash);
private:
using EntryRing = boost::circular_buffer<ConcreteEntry>;
static const std::size_t DEFAULT_MAX_NEW = 100;
static const std::size_t DEFAULT_MAX_RECENT = 10000;
Log **m_indirect_this;
const SubsystemMap *m_subs;
std::mutex m_queue_mutex;
std::mutex m_flush_mutex;
std::condition_variable m_cond_loggers;
std::condition_variable m_cond_flusher;
pthread_t m_queue_mutex_holder;
pthread_t m_flush_mutex_holder;
EntryVector m_new; ///< new entries
EntryRing m_recent; ///< recent (less new) entries we've already written at low detail
EntryVector m_flush; ///< entries to be flushed (here to optimize heap allocations)
std::string m_log_file;
int m_fd = -1;
uid_t m_uid = 0;
gid_t m_gid = 0;
int m_fd_stderr = STDERR_FILENO;
int m_fd_last_error = 0; ///< last error we say writing to fd (if any)
int m_syslog_log = -2, m_syslog_crash = -2;
int m_stderr_log = -1, m_stderr_crash = -1;
int m_graylog_log = -3, m_graylog_crash = -3;
int m_journald_log = -3, m_journald_crash = -3;
std::string m_log_stderr_prefix;
bool do_stderr_poll = false;
std::shared_ptr<Graylog> m_graylog;
std::unique_ptr<JournaldLogger> m_journald;
std::vector<char> m_log_buf;
bool m_stop = false;
std::size_t m_max_new = DEFAULT_MAX_NEW;
bool m_inject_segv = false;
void *entry() override;
void _log_safe_write(std::string_view sv);
void _flush_logbuf();
void _log_message(std::string_view s, bool crash);
void _configure_stderr();
void _log_stderr(std::string_view strv);
};
}
}
#endif
| 3,335 | 20.803922 | 88 | h |
null | ceph-main/src/log/LogClock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LOG_CLOCK_H
#define CEPH_LOG_CLOCK_H
#include <cstdio>
#include <chrono>
#include <ctime>
#include <sys/time.h>
#include "include/ceph_assert.h"
#include "common/ceph_time.h"
#ifndef HAVE_SUSECONDS_T
typedef long suseconds_t;
#endif
namespace ceph {
namespace logging {
namespace _logclock {
// Because the underlying representations of a duration can be any
// arithmetic type we wish, slipping a coarseness tag there is the
// least hacky way to tag them. I'd also considered doing bit-stealing
// and just setting the low bit of the representation unconditionally
// to mark it as fine, BUT that would cut our nanosecond precision in
// half which sort of obviates the point of 'fine'…admittedly real
// computers probably don't care. More to the point it wouldn't be
// durable under arithmetic unless we wrote a whole class to support
// it /anyway/, and if I'm going to do that I may as well add a bool.
// (Yes I know we don't do arithmetic on log timestamps, but I don't
// want everything to suddenly break because someone did something
// that the std::chrono::timepoint contract actually supports.)
struct taggedrep {
uint64_t count;
bool coarse;
explicit taggedrep(uint64_t count) : count(count), coarse(true) {}
taggedrep(uint64_t count, bool coarse) : count(count), coarse(coarse) {}
explicit operator uint64_t() {
return count;
}
};
// Proper significant figure support would be a bit excessive. Also
// we'd have to know the precision of the clocks on Linux and FreeBSD
// and whatever else we want to support.
inline taggedrep operator +(const taggedrep& l, const taggedrep& r) {
return { l.count + r.count, l.coarse || r.coarse };
}
inline taggedrep operator -(const taggedrep& l, const taggedrep& r) {
return { l.count - r.count, l.coarse || r.coarse };
}
inline taggedrep operator *(const taggedrep& l, const taggedrep& r) {
return { l.count * r.count, l.coarse || r.coarse };
}
inline taggedrep operator /(const taggedrep& l, const taggedrep& r) {
return { l.count / r.count, l.coarse || r.coarse };
}
inline taggedrep operator %(const taggedrep& l, const taggedrep& r) {
return { l.count % r.count, l.coarse || r.coarse };
}
// You can compare coarse and fine time. You shouldn't do so in any
// case where ordering actually MATTERS but in practice people won't
// actually ping-pong their logs back and forth between them.
inline bool operator ==(const taggedrep& l, const taggedrep& r) {
return l.count == r.count;
}
inline bool operator !=(const taggedrep& l, const taggedrep& r) {
return l.count != r.count;
}
inline bool operator <(const taggedrep& l, const taggedrep& r) {
return l.count < r.count;
}
inline bool operator <=(const taggedrep& l, const taggedrep& r) {
return l.count <= r.count;
}
inline bool operator >=(const taggedrep& l, const taggedrep& r) {
return l.count >= r.count;
}
inline bool operator >(const taggedrep& l, const taggedrep& r) {
return l.count > r.count;
}
}
class log_clock {
public:
using rep = _logclock::taggedrep;
using period = std::nano;
using duration = std::chrono::duration<rep, period>;
// The second template parameter defaults to the clock's duration
// type.
using time_point = std::chrono::time_point<log_clock>;
static constexpr const bool is_steady = false;
time_point now() noexcept {
return appropriate_now();
}
void coarsen() {
appropriate_now = coarse_now;
}
void refine() {
appropriate_now = fine_now;
}
// Since our formatting is done in microseconds and we're using it
// anyway, we may as well keep this one
static timeval to_timeval(time_point t) {
auto rep = t.time_since_epoch().count();
timespan ts(rep.count);
#ifndef _WIN32
return { static_cast<time_t>(std::chrono::duration_cast<std::chrono::seconds>(ts).count()),
static_cast<suseconds_t>(std::chrono::duration_cast<std::chrono::microseconds>(
ts % std::chrono::seconds(1)).count()) };
#else
return { static_cast<long>(std::chrono::duration_cast<std::chrono::seconds>(ts).count()),
static_cast<long>(std::chrono::duration_cast<std::chrono::microseconds>(
ts % std::chrono::seconds(1)).count()) };
#endif
}
private:
static time_point coarse_now() {
return time_point(
duration(_logclock::taggedrep(coarse_real_clock::now()
.time_since_epoch().count(), true)));
}
static time_point fine_now() {
return time_point(
duration(_logclock::taggedrep(real_clock::now()
.time_since_epoch().count(), false)));
}
time_point(*appropriate_now)() = coarse_now;
};
using log_time = log_clock::time_point;
inline int append_time(const log_time& t, char *out, int outlen) {
bool coarse = t.time_since_epoch().count().coarse;
auto tv = log_clock::to_timeval(t);
std::tm bdt;
time_t t_sec = tv.tv_sec;
localtime_r(&t_sec, &bdt);
char tz[32] = { 0 };
strftime(tz, sizeof(tz), "%z", &bdt);
int r;
if (coarse) {
r = std::snprintf(out, outlen, "%04d-%02d-%02dT%02d:%02d:%02d.%03ld%s",
bdt.tm_year + 1900, bdt.tm_mon + 1, bdt.tm_mday,
bdt.tm_hour, bdt.tm_min, bdt.tm_sec,
static_cast<long>(tv.tv_usec / 1000), tz);
} else {
r = std::snprintf(out, outlen, "%04d-%02d-%02dT%02d:%02d:%02d.%06ld%s",
bdt.tm_year + 1900, bdt.tm_mon + 1, bdt.tm_mday,
bdt.tm_hour, bdt.tm_min, bdt.tm_sec,
static_cast<long>(tv.tv_usec), tz);
}
// Since our caller just adds the return value to something without
// checking it…
ceph_assert(r >= 0);
return r;
}
}
}
#endif
| 5,688 | 32.662722 | 95 | h |
null | ceph-main/src/log/SubsystemMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LOG_SUBSYSTEMS
#define CEPH_LOG_SUBSYSTEMS
#include <string>
#include <vector>
#include <algorithm>
#include "common/likely.h"
#include "common/subsys_types.h"
#include "include/ceph_assert.h"
namespace ceph {
namespace logging {
class SubsystemMap {
// Access to the current gathering levels must be *FAST* as they are
// read over and over from all places in the code (via should_gather()
// by i.e. dout).
std::array<uint8_t, ceph_subsys_get_num()> m_gather_levels;
// The rest. Should be as small as possible to not unnecessarily
// enlarge md_config_t and spread it other elements across cache
// lines. Access can be slow.
std::vector<ceph_subsys_item_t> m_subsys;
friend class Log;
public:
SubsystemMap() {
constexpr auto s = ceph_subsys_get_as_array();
m_subsys.reserve(s.size());
std::size_t i = 0;
for (const ceph_subsys_item_t& item : s) {
m_subsys.emplace_back(item);
m_gather_levels[i++] = std::max(item.log_level, item.gather_level);
}
}
constexpr static std::size_t get_num() {
return ceph_subsys_get_num();
}
constexpr static std::size_t get_max_subsys_len() {
return ceph_subsys_max_name_length();
}
int get_log_level(unsigned subsys) const {
if (subsys >= get_num())
subsys = 0;
return m_subsys[subsys].log_level;
}
int get_gather_level(unsigned subsys) const {
if (subsys >= get_num())
subsys = 0;
return m_subsys[subsys].gather_level;
}
// TODO(rzarzynski): move to string_view?
constexpr const char* get_name(unsigned subsys) const {
if (subsys >= get_num())
subsys = 0;
return ceph_subsys_get_as_array()[subsys].name;
}
template <unsigned SubV, int LvlV>
bool should_gather() const {
static_assert(SubV < get_num(), "wrong subsystem ID");
static_assert(LvlV >= -1 && LvlV <= 200);
if constexpr (LvlV <= 0) {
// handle the -1 and 0 levels entirely at compile-time.
// Such debugs are intended to be gathered regardless even
// of the user configuration.
return true;
} else {
// we expect that setting level different than the default
// is rather unusual.
return expect(LvlV <= static_cast<int>(m_gather_levels[SubV]),
LvlV <= ceph_subsys_get_max_default_level(SubV));
}
}
bool should_gather(const unsigned sub, int level) const {
ceph_assert(sub < m_subsys.size());
return level <= static_cast<int>(m_gather_levels[sub]);
}
void set_log_level(unsigned subsys, uint8_t log)
{
ceph_assert(subsys < m_subsys.size());
m_subsys[subsys].log_level = log;
m_gather_levels[subsys] = \
std::max(log, m_subsys[subsys].gather_level);
}
void set_gather_level(unsigned subsys, uint8_t gather)
{
ceph_assert(subsys < m_subsys.size());
m_subsys[subsys].gather_level = gather;
m_gather_levels[subsys] = \
std::max(m_subsys[subsys].log_level, gather);
}
};
}
}
#endif
| 3,065 | 25.894737 | 73 | h |
null | ceph-main/src/mds/Anchor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ANCHOR_H
#define CEPH_ANCHOR_H
#include <string>
#include "include/types.h"
#include "mdstypes.h"
#include "include/buffer.h"
/*
* Anchor represents primary linkage of an inode. When adding inode to an
* anchor table, MDS ensures that the table also contains inode's ancestor
* inodes. MDS can get inode's path by looking up anchor table recursively.
*/
class Anchor {
public:
Anchor() {}
Anchor(inodeno_t i, inodeno_t di, std::string_view str, __u8 tp) :
ino(i), dirino(di), d_name(str), d_type(tp) {}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &bl);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<Anchor*>& ls);
bool operator==(const Anchor &r) const {
return ino == r.ino && dirino == r.dirino &&
d_name == r.d_name && d_type == r.d_type &&
frags == r.frags;
}
inodeno_t ino; // anchored ino
inodeno_t dirino;
std::string d_name;
__u8 d_type = 0;
std::set<frag_t> frags;
int omap_idx = -1; // stored in which omap object
};
WRITE_CLASS_ENCODER(Anchor)
class RecoveredAnchor : public Anchor {
public:
RecoveredAnchor() {}
mds_rank_t auth = MDS_RANK_NONE; // auth hint
};
class OpenedAnchor : public Anchor {
public:
OpenedAnchor(inodeno_t i, inodeno_t di, std::string_view str, __u8 tp, int nr) :
Anchor(i, di, str, tp),
nref(nr)
{}
mutable int nref = 0; // how many children
};
std::ostream& operator<<(std::ostream& out, const Anchor &a);
#endif
| 1,904 | 24.743243 | 82 | h |
null | ceph-main/src/mds/BatchOp.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef MDS_BATCHOP_H
#define MDS_BATCHOP_H
#include "common/ref.h"
#include "mdstypes.h"
class BatchOp {
public:
virtual ~BatchOp() {}
virtual void add_request(const ceph::ref_t<class MDRequestImpl>& mdr) = 0;
virtual ceph::ref_t<class MDRequestImpl> find_new_head() = 0;
virtual void print(std::ostream&) = 0;
void forward(mds_rank_t target);
void respond(int r);
protected:
virtual void _forward(mds_rank_t) = 0;
virtual void _respond(mds_rank_t) = 0;
};
#endif
| 889 | 20.707317 | 76 | h |
null | ceph-main/src/mds/Beacon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef BEACON_STATE_H
#define BEACON_STATE_H
#include <mutex>
#include <string_view>
#include <thread>
#include "include/types.h"
#include "include/Context.h"
#include "msg/Dispatcher.h"
#include "messages/MMDSBeacon.h"
class MonClient;
class MDSRank;
/**
* One of these per MDS. Handle beacon logic in this separate class so
* that a busy MDS holding its own lock does not hold up sending beacon
* messages to the mon and cause false lagginess.
*
* So that we can continue to operate while the MDS is holding its own lock,
* we keep copies of the data needed to generate beacon messages. The MDS is
* responsible for calling Beacon::notify_* when things change.
*/
class Beacon : public Dispatcher
{
public:
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
bool missed_beacon_ack_dump = false;
bool missed_internal_heartbeat_dump = false;
Beacon(CephContext *cct, MonClient *monc, std::string_view name);
~Beacon() override;
void init(const MDSMap &mdsmap);
void shutdown();
bool ms_can_fast_dispatch_any() const override { return true; }
bool ms_can_fast_dispatch2(const cref_t<Message>& m) const override;
void ms_fast_dispatch2(const ref_t<Message>& m) override;
bool ms_dispatch2(const ref_t<Message> &m) override;
void ms_handle_connect(Connection *c) override {}
bool ms_handle_reset(Connection *c) override {return false;}
void ms_handle_remote_reset(Connection *c) override {}
bool ms_handle_refused(Connection *c) override {return false;}
void notify_mdsmap(const MDSMap &mdsmap);
void notify_health(const MDSRank *mds);
void handle_mds_beacon(const cref_t<MMDSBeacon> &m);
void send();
void set_want_state(const MDSMap &mdsmap, MDSMap::DaemonState newstate);
MDSMap::DaemonState get_want_state() const;
/**
* Send a beacon, and block until the ack is received from the mon
* or `duration` seconds pass, whichever happens sooner. Useful
* for emitting a last message on shutdown.
*/
void send_and_wait(const double duration);
bool is_laggy();
double last_cleared_laggy() const {
std::unique_lock lock(mutex);
return std::chrono::duration<double>(clock::now()-last_laggy).count();
}
private:
void _notify_mdsmap(const MDSMap &mdsmap);
bool _send();
mutable std::mutex mutex;
std::thread sender;
std::condition_variable cvar;
time last_send = clock::zero();
double beacon_interval = 5.0;
bool finished = false;
MonClient* monc;
// Items we duplicate from the MDS to have access under our own lock
std::string name;
version_t epoch = 0;
CompatSet compat;
MDSMap::DaemonState want_state = MDSMap::STATE_BOOT;
// Internal beacon state
version_t last_seq = 0; // last seq sent to monitor
std::map<version_t,time> seq_stamp; // seq # -> time sent
time last_acked_stamp = clock::zero(); // last time we sent a beacon that got acked
bool laggy = false;
time last_laggy = clock::zero();
// Health status to be copied into each beacon message
MDSHealth health;
};
#endif // BEACON_STATE_H
| 3,476 | 28.717949 | 86 | h |
null | ceph-main/src/mds/CDentry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CDENTRY_H
#define CEPH_CDENTRY_H
#include <string>
#include <string_view>
#include <set>
#include "include/counter.h"
#include "include/types.h"
#include "include/buffer_fwd.h"
#include "include/lru.h"
#include "include/elist.h"
#include "include/filepath.h"
#include "BatchOp.h"
#include "MDSCacheObject.h"
#include "MDSContext.h"
#include "Mutation.h"
#include "SimpleLock.h"
#include "LocalLockC.h"
#include "ScrubHeader.h"
class CInode;
class CDir;
class Locker;
class CDentry;
class LogSegment;
class Session;
// define an ordering
bool operator<(const CDentry& l, const CDentry& r);
// dentry
class CDentry : public MDSCacheObject, public LRUObject, public Counter<CDentry> {
public:
MEMPOOL_CLASS_HELPERS();
friend class CDir;
struct linkage_t {
CInode *inode = nullptr;
inodeno_t remote_ino = 0;
unsigned char remote_d_type = 0;
linkage_t() {}
// dentry type is primary || remote || null
// inode ptr is required for primary, optional for remote, undefined for null
bool is_primary() const { return remote_ino == 0 && inode != 0; }
bool is_remote() const { return remote_ino > 0; }
bool is_null() const { return remote_ino == 0 && inode == 0; }
CInode *get_inode() { return inode; }
const CInode *get_inode() const { return inode; }
inodeno_t get_remote_ino() const { return remote_ino; }
unsigned char get_remote_d_type() const { return remote_d_type; }
std::string get_remote_d_type_string() const;
void set_remote(inodeno_t ino, unsigned char d_type) {
remote_ino = ino;
remote_d_type = d_type;
inode = 0;
}
};
// -- state --
static const int STATE_NEW = (1<<0);
static const int STATE_FRAGMENTING = (1<<1);
static const int STATE_PURGING = (1<<2);
static const int STATE_BADREMOTEINO = (1<<3);
static const int STATE_EVALUATINGSTRAY = (1<<4);
static const int STATE_PURGINGPINNED = (1<<5);
static const int STATE_BOTTOMLRU = (1<<6);
static const int STATE_UNLINKING = (1<<7);
static const int STATE_REINTEGRATING = (1<<8);
// stray dentry needs notification of releasing reference
static const int STATE_STRAY = STATE_NOTIFYREF;
static const int MASK_STATE_IMPORT_KEPT = STATE_BOTTOMLRU;
// -- pins --
static const int PIN_INODEPIN = 1; // linked inode is pinned
static const int PIN_FRAGMENTING = -2; // containing dir is refragmenting
static const int PIN_PURGING = 3;
static const int PIN_SCRUBPARENT = 4;
static const int PIN_WAITUNLINKSTATE = 5;
static const unsigned EXPORT_NONCE = 1;
const static uint64_t WAIT_UNLINK_STATE = (1<<0);
const static uint64_t WAIT_UNLINK_FINISH = (1<<1);
const static uint64_t WAIT_REINTEGRATE_FINISH = (1<<2);
uint32_t replica_unlinking_ref = 0;
CDentry(std::string_view n, __u32 h,
mempool::mds_co::string alternate_name,
snapid_t f, snapid_t l) :
hash(h),
first(f), last(l),
item_dirty(this),
lock(this, &lock_type),
versionlock(this, &versionlock_type),
name(n),
alternate_name(std::move(alternate_name))
{}
CDentry(std::string_view n, __u32 h,
mempool::mds_co::string alternate_name,
inodeno_t ino, unsigned char dt,
snapid_t f, snapid_t l) :
hash(h),
first(f), last(l),
item_dirty(this),
lock(this, &lock_type),
versionlock(this, &versionlock_type),
name(n),
alternate_name(std::move(alternate_name))
{
linkage.remote_ino = ino;
linkage.remote_d_type = dt;
}
~CDentry() override {
ceph_assert(batch_ops.empty());
}
std::string_view pin_name(int p) const override {
switch (p) {
case PIN_INODEPIN: return "inodepin";
case PIN_FRAGMENTING: return "fragmenting";
case PIN_PURGING: return "purging";
case PIN_SCRUBPARENT: return "scrubparent";
case PIN_WAITUNLINKSTATE: return "waitunlinkstate";
default: return generic_pin_name(p);
}
}
// -- wait --
//static const int WAIT_LOCK_OFFSET = 8;
void add_waiter(uint64_t tag, MDSContext *c) override;
bool is_lt(const MDSCacheObject *r) const override {
return *this < *static_cast<const CDentry*>(r);
}
dentry_key_t key() {
return dentry_key_t(last, name.c_str(), hash);
}
bool check_corruption(bool load);
const CDir *get_dir() const { return dir; }
CDir *get_dir() { return dir; }
std::string_view get_name() const { return std::string_view(name); }
std::string_view get_alternate_name() const {
return std::string_view(alternate_name);
}
void set_alternate_name(mempool::mds_co::string altn) {
alternate_name = std::move(altn);
}
void set_alternate_name(std::string_view altn) {
alternate_name = mempool::mds_co::string(altn);
}
__u32 get_hash() const { return hash; }
// linkage
const linkage_t *get_linkage() const { return &linkage; }
linkage_t *get_linkage() { return &linkage; }
linkage_t *_project_linkage() {
projected.push_back(linkage_t());
return &projected.back();
}
void push_projected_linkage();
void push_projected_linkage(inodeno_t ino, char d_type) {
linkage_t *p = _project_linkage();
p->remote_ino = ino;
p->remote_d_type = d_type;
}
void push_projected_linkage(CInode *inode);
linkage_t *pop_projected_linkage();
bool is_projected() const { return !projected.empty(); }
linkage_t *get_projected_linkage() {
if (!projected.empty())
return &projected.back();
return &linkage;
}
const linkage_t *get_projected_linkage() const {
if (!projected.empty())
return &projected.back();
return &linkage;
}
CInode *get_projected_inode() {
return get_projected_linkage()->inode;
}
bool use_projected(client_t client, const MutationRef& mut) const {
return lock.can_read_projected(client) ||
lock.get_xlock_by() == mut;
}
linkage_t *get_linkage(client_t client, const MutationRef& mut) {
return use_projected(client, mut) ? get_projected_linkage() : get_linkage();
}
// ref counts: pin ourselves in the LRU when we're pinned.
void first_get() override {
lru_pin();
}
void last_put() override {
lru_unpin();
}
void _put() override;
// auth pins
bool can_auth_pin(int *err_ret=nullptr) const override;
void auth_pin(void *by) override;
void auth_unpin(void *by) override;
void adjust_nested_auth_pins(int diradj, void *by);
bool is_frozen() const override;
bool is_freezing() const override;
int get_num_dir_auth_pins() const;
// remote links
void link_remote(linkage_t *dnl, CInode *in);
void unlink_remote(linkage_t *dnl);
// copy cons
CDentry(const CDentry& m);
const CDentry& operator= (const CDentry& right);
// misc
void make_path_string(std::string& s, bool projected=false) const;
void make_path(filepath& fp, bool projected=false) const;
// -- version --
version_t get_version() const { return version; }
void set_version(version_t v) { projected_version = version = v; }
version_t get_projected_version() const { return projected_version; }
void set_projected_version(version_t v) { projected_version = v; }
mds_authority_t authority() const override;
version_t pre_dirty(version_t min=0);
void _mark_dirty(LogSegment *ls);
void mark_dirty(version_t pv, LogSegment *ls);
void mark_clean();
void mark_new();
bool is_new() const { return state_test(STATE_NEW); }
void clear_new() { state_clear(STATE_NEW); }
void mark_auth();
void clear_auth();
bool scrub(snapid_t next_seq);
// -- exporting
// note: this assumes the dentry already exists.
// i.e., the name is already extracted... so we just need the other state.
void encode_export(ceph::buffer::list& bl) {
ENCODE_START(1, 1, bl);
encode(first, bl);
encode(state, bl);
encode(version, bl);
encode(projected_version, bl);
encode(lock, bl);
encode(get_replicas(), bl);
get(PIN_TEMPEXPORTING);
ENCODE_FINISH(bl);
}
void finish_export() {
// twiddle
clear_replica_map();
replica_nonce = EXPORT_NONCE;
clear_auth();
if (is_dirty())
mark_clean();
put(PIN_TEMPEXPORTING);
}
void abort_export() {
put(PIN_TEMPEXPORTING);
}
void decode_import(ceph::buffer::list::const_iterator& blp, LogSegment *ls) {
DECODE_START(1, blp);
decode(first, blp);
__u32 nstate;
decode(nstate, blp);
decode(version, blp);
decode(projected_version, blp);
decode(lock, blp);
decode(get_replicas(), blp);
// twiddle
state &= MASK_STATE_IMPORT_KEPT;
mark_auth();
if (nstate & STATE_DIRTY)
_mark_dirty(ls);
if (is_replicated())
get(PIN_REPLICATED);
replica_nonce = 0;
DECODE_FINISH(blp);
}
// -- locking --
SimpleLock* get_lock(int type) override {
ceph_assert(type == CEPH_LOCK_DN);
return &lock;
}
void set_object_info(MDSCacheObjectInfo &info) override;
void encode_lock_state(int type, ceph::buffer::list& bl) override;
void decode_lock_state(int type, const ceph::buffer::list& bl) override;
// ---------------------------------------------
// replicas (on clients)
bool is_any_leases() const {
return !client_lease_map.empty();
}
const ClientLease *get_client_lease(client_t c) const {
if (client_lease_map.count(c))
return client_lease_map.find(c)->second;
return 0;
}
ClientLease *get_client_lease(client_t c) {
if (client_lease_map.count(c))
return client_lease_map.find(c)->second;
return 0;
}
bool have_client_lease(client_t c) const {
const ClientLease *l = get_client_lease(c);
if (l)
return true;
else
return false;
}
ClientLease *add_client_lease(client_t c, Session *session);
void remove_client_lease(ClientLease *r, Locker *locker); // returns remaining mask (if any), and kicks locker eval_gathers
void remove_client_leases(Locker *locker);
std::ostream& print_db_line_prefix(std::ostream& out) override;
void print(std::ostream& out) override;
void dump(ceph::Formatter *f) const;
static void encode_remote(inodeno_t& ino, unsigned char d_type,
std::string_view alternate_name,
bufferlist &bl);
static void decode_remote(char icode, inodeno_t& ino, unsigned char& d_type,
mempool::mds_co::string& alternate_name,
ceph::buffer::list::const_iterator& bl);
__u32 hash;
snapid_t first, last;
bool corrupt_first_loaded = false; /* for Postgres corruption detection */
elist<CDentry*>::item item_dirty, item_dir_dirty;
elist<CDentry*>::item item_stray;
// lock
static LockType lock_type;
static LockType versionlock_type;
SimpleLock lock; // FIXME referenced containers not in mempool
LocalLockC versionlock; // FIXME referenced containers not in mempool
mempool::mds_co::map<client_t,ClientLease*> client_lease_map;
std::map<int, std::unique_ptr<BatchOp>> batch_ops;
protected:
friend class Migrator;
friend class Locker;
friend class MDCache;
friend class StrayManager;
friend class CInode;
friend class C_MDC_XlockRequest;
CDir *dir = nullptr; // containing dirfrag
linkage_t linkage; /* durable */
mempool::mds_co::list<linkage_t> projected;
version_t version = 0; // dir version when last touched.
version_t projected_version = 0; // what it will be when i unlock/commit.
private:
mempool::mds_co::string name;
mempool::mds_co::string alternate_name;
};
std::ostream& operator<<(std::ostream& out, const CDentry& dn);
#endif
| 12,047 | 28.171913 | 126 | h |
null | ceph-main/src/mds/Capability.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CAPABILITY_H
#define CEPH_CAPABILITY_H
#include "include/buffer_fwd.h"
#include "include/counter.h"
#include "include/mempool.h"
#include "include/xlist.h"
#include "include/elist.h"
#include "common/config.h"
#include "mdstypes.h"
/*
Capability protocol notes.
- two types of cap events from mds -> client:
- cap "issue" in a MClientReply, or an MClientCaps IMPORT op.
- cap "update" (revocation or grant) .. an MClientCaps message.
- if client has cap, the mds should have it too.
- if client has no dirty data, it can release it without waiting for an mds ack.
- client may thus get a cap _update_ and not have the cap. ignore it.
- mds should track seq of last issue. any release
attempt will only succeed if the client has seen the latest.
- a UPDATE updates the clients issued caps, wanted, etc. it may also flush dirty metadata.
- 'caps' are which caps the client retains.
- if 0, client wishes to release the cap
- 'wanted' is which caps the client wants.
- 'dirty' is which metadata is to be written.
- client gets a FLUSH_ACK with matching dirty flags indicating which caps were written.
- a FLUSH_ACK acks a FLUSH.
- 'dirty' is the _original_ FLUSH's dirty (i.e., which metadata was written back)
- 'seq' is the _original_ FLUSH's seq.
- 'caps' is the _original_ FLUSH's caps (not actually important)
- client can conclude that (dirty & ~caps) bits were successfully cleaned.
- a FLUSHSNAP flushes snapshot metadata.
- 'dirty' indicates which caps, were dirty, if any.
- mds writes metadata. if dirty!=0, replies with FLUSHSNAP_ACK.
*/
class CInode;
class Session;
class MDLockCache;
namespace ceph {
class Formatter;
}
class Capability : public Counter<Capability> {
public:
MEMPOOL_CLASS_HELPERS();
struct Export {
Export() {}
Export(int64_t id, int w, int i, int p, snapid_t cf,
ceph_seq_t s, ceph_seq_t m, utime_t lis, unsigned st) :
cap_id(id), wanted(w), issued(i), pending(p), client_follows(cf),
seq(s), mseq(m), last_issue_stamp(lis), state(st) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<Export*>& ls);
int64_t cap_id = 0;
int32_t wanted = 0;
int32_t issued = 0;
int32_t pending = 0;
snapid_t client_follows;
ceph_seq_t seq = 0;
ceph_seq_t mseq = 0;
utime_t last_issue_stamp;
uint32_t state = 0;
};
struct Import {
Import() {}
Import(int64_t i, ceph_seq_t s, ceph_seq_t m) : cap_id(i), issue_seq(s), mseq(m) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &p);
void dump(ceph::Formatter *f) const;
int64_t cap_id = 0;
ceph_seq_t issue_seq = 0;
ceph_seq_t mseq = 0;
};
struct revoke_info {
revoke_info() {}
revoke_info(__u32 b, ceph_seq_t s, ceph_seq_t li) : before(b), seq(s), last_issue(li) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<revoke_info*>& ls);
__u32 before = 0;
ceph_seq_t seq = 0;
ceph_seq_t last_issue = 0;
};
const static unsigned STATE_NOTABLE = (1<<0);
const static unsigned STATE_NEW = (1<<1);
const static unsigned STATE_IMPORTING = (1<<2);
const static unsigned STATE_NEEDSNAPFLUSH = (1<<3);
const static unsigned STATE_CLIENTWRITEABLE = (1<<4);
const static unsigned STATE_NOINLINE = (1<<5);
const static unsigned STATE_NOPOOLNS = (1<<6);
const static unsigned STATE_NOQUOTA = (1<<7);
const static unsigned MASK_STATE_EXPORTED =
(STATE_CLIENTWRITEABLE | STATE_NOINLINE | STATE_NOPOOLNS | STATE_NOQUOTA);
Capability(CInode *i=nullptr, Session *s=nullptr, uint64_t id=0);
Capability(const Capability& other) = delete;
const Capability& operator=(const Capability& other) = delete;
int pending() const {
return _pending;
}
int issued() const {
return _issued;
}
int revoking() const {
return _issued & ~_pending;
}
ceph_seq_t issue(unsigned c, bool reval=false) {
if (reval)
revalidate();
if (_pending & ~c) {
// revoking (and maybe adding) bits. note caps prior to this revocation
_revokes.emplace_back(_pending, last_sent, last_issue);
_pending = c;
_issued |= c;
if (!is_notable())
mark_notable();
} else if (~_pending & c) {
// adding bits only. remove obsolete revocations?
_pending |= c;
_issued |= c;
// drop old _revokes with no bits we don't have
while (!_revokes.empty() &&
(_revokes.back().before & ~_pending) == 0)
_revokes.pop_back();
} else {
// no change.
ceph_assert(_pending == c);
}
//last_issue =
inc_last_seq();
return last_sent;
}
ceph_seq_t issue_norevoke(unsigned c, bool reval=false) {
if (reval)
revalidate();
_pending |= c;
_issued |= c;
clear_new();
inc_last_seq();
return last_sent;
}
int confirm_receipt(ceph_seq_t seq, unsigned caps);
// we may get a release racing with revocations, which means our revokes will be ignored
// by the client. clean them out of our _revokes history so we don't wait on them.
void clean_revoke_from(ceph_seq_t li) {
bool changed = false;
while (!_revokes.empty() && _revokes.front().last_issue <= li) {
_revokes.pop_front();
changed = true;
}
if (changed) {
bool was_revoking = (_issued & ~_pending);
calc_issued();
if (was_revoking && _issued == _pending) {
item_revoking_caps.remove_myself();
item_client_revoking_caps.remove_myself();
maybe_clear_notable();
}
}
}
ceph_seq_t get_mseq() const { return mseq; }
void inc_mseq() { mseq++; }
utime_t get_last_issue_stamp() const { return last_issue_stamp; }
utime_t get_last_revoke_stamp() const { return last_revoke_stamp; }
void set_last_issue() { last_issue = last_sent; }
void set_last_issue_stamp(utime_t t) { last_issue_stamp = t; }
void set_last_revoke_stamp(utime_t t) { last_revoke_stamp = t; }
void reset_num_revoke_warnings() { num_revoke_warnings = 0; }
void inc_num_revoke_warnings() { ++num_revoke_warnings; }
unsigned get_num_revoke_warnings() const { return num_revoke_warnings; }
void set_cap_id(uint64_t i) { cap_id = i; }
uint64_t get_cap_id() const { return cap_id; }
//ceph_seq_t get_last_issue() { return last_issue; }
bool is_suppress() const { return suppress > 0; }
void inc_suppress() { suppress++; }
void dec_suppress() { suppress--; }
static bool is_wanted_notable(int wanted) {
return wanted & (CEPH_CAP_ANY_WR|CEPH_CAP_FILE_WR|CEPH_CAP_FILE_RD);
}
bool is_wanted_notable() const {
return is_wanted_notable(wanted());
}
bool is_notable() const { return state & STATE_NOTABLE; }
bool is_stale() const;
bool is_valid() const;
bool is_new() const { return state & STATE_NEW; }
void mark_new() { state |= STATE_NEW; }
void clear_new() { state &= ~STATE_NEW; }
bool is_importing() const { return state & STATE_IMPORTING; }
void mark_importing() { state |= STATE_IMPORTING; }
void clear_importing() { state &= ~STATE_IMPORTING; }
bool need_snapflush() const { return state & STATE_NEEDSNAPFLUSH; }
void mark_needsnapflush() { state |= STATE_NEEDSNAPFLUSH; }
void clear_needsnapflush() { state &= ~STATE_NEEDSNAPFLUSH; }
bool is_clientwriteable() const { return state & STATE_CLIENTWRITEABLE; }
void mark_clientwriteable() {
if (!is_clientwriteable()) {
state |= STATE_CLIENTWRITEABLE;
if (!is_notable())
mark_notable();
}
}
void clear_clientwriteable() {
if (is_clientwriteable()) {
state &= ~STATE_CLIENTWRITEABLE;
maybe_clear_notable();
}
}
bool is_noinline() const { return state & STATE_NOINLINE; }
bool is_nopoolns() const { return state & STATE_NOPOOLNS; }
bool is_noquota() const { return state & STATE_NOQUOTA; }
CInode *get_inode() const { return inode; }
Session *get_session() const { return session; }
client_t get_client() const;
// caps this client wants to hold
int wanted() const { return _wanted; }
void set_wanted(int w);
void inc_last_seq() { last_sent++; }
ceph_seq_t get_last_seq() const {
return last_sent;
}
ceph_seq_t get_last_issue() const { return last_issue; }
void reset_seq() {
last_sent = 0;
last_issue = 0;
}
// -- exports --
Export make_export() const {
return Export(cap_id, wanted(), issued(), pending(), client_follows, get_last_seq(), mseq+1, last_issue_stamp, state);
}
void merge(const Export& other, bool auth_cap) {
// issued + pending
int newpending = other.pending | pending();
if (other.issued & ~newpending)
issue(other.issued | newpending);
else
issue(newpending);
last_issue_stamp = other.last_issue_stamp;
client_follows = other.client_follows;
state |= other.state & MASK_STATE_EXPORTED;
if ((other.state & STATE_CLIENTWRITEABLE) && !is_notable())
mark_notable();
// wanted
set_wanted(wanted() | other.wanted);
if (auth_cap)
mseq = other.mseq;
}
void merge(int otherwanted, int otherissued) {
// issued + pending
int newpending = pending();
if (otherissued & ~newpending)
issue(otherissued | newpending);
else
issue(newpending);
// wanted
set_wanted(wanted() | otherwanted);
}
int revoke() {
if (revoking())
return confirm_receipt(last_sent, pending());
return 0;
}
// serializers
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<Capability*>& ls);
snapid_t client_follows = 0;
version_t client_xattr_version = 0;
version_t client_inline_version = 0;
int64_t last_rbytes = 0;
int64_t last_rsize = 0;
xlist<Capability*>::item item_session_caps;
xlist<Capability*>::item item_snaprealm_caps;
xlist<Capability*>::item item_revoking_caps;
xlist<Capability*>::item item_client_revoking_caps;
elist<MDLockCache*> lock_caches;
int get_lock_cache_allowed() const { return lock_cache_allowed; }
void set_lock_cache_allowed(int c) { lock_cache_allowed |= c; }
void clear_lock_cache_allowed(int c) { lock_cache_allowed &= ~c; }
private:
void calc_issued() {
_issued = _pending;
for (const auto &r : _revokes) {
_issued |= r.before;
}
}
void revalidate();
void mark_notable();
void maybe_clear_notable();
CInode *inode;
Session *session;
uint64_t cap_id;
uint32_t cap_gen;
__u32 _wanted = 0; // what the client wants (ideally)
utime_t last_issue_stamp;
utime_t last_revoke_stamp;
unsigned num_revoke_warnings = 0;
// track in-flight caps --------------
// - add new caps to _pending
// - track revocations in _revokes list
__u32 _pending = 0, _issued = 0;
mempool::mds_co::list<revoke_info> _revokes;
ceph_seq_t last_sent = 0;
ceph_seq_t last_issue = 0;
ceph_seq_t mseq = 0;
int suppress = 0;
unsigned state = 0;
int lock_cache_allowed = 0;
};
WRITE_CLASS_ENCODER(Capability::Export)
WRITE_CLASS_ENCODER(Capability::Import)
WRITE_CLASS_ENCODER(Capability::revoke_info)
WRITE_CLASS_ENCODER(Capability)
#endif
| 11,877 | 28.919395 | 122 | h |
null | ceph-main/src/mds/DamageTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef DAMAGE_TABLE_H_
#define DAMAGE_TABLE_H_
#include <string_view>
#include "mdstypes.h"
#include "include/random.h"
class CDir;
typedef uint64_t damage_entry_id_t;
typedef enum
{
DAMAGE_ENTRY_DIRFRAG,
DAMAGE_ENTRY_DENTRY,
DAMAGE_ENTRY_BACKTRACE
} damage_entry_type_t;
class DamageEntry
{
public:
DamageEntry()
{
id = ceph::util::generate_random_number<damage_entry_id_t>(0, 0xffffffff);
reported_at = ceph_clock_now();
}
virtual ~DamageEntry();
virtual damage_entry_type_t get_type() const = 0;
virtual void dump(Formatter *f) const = 0;
damage_entry_id_t id;
utime_t reported_at;
// path is optional, advisory. Used to give the admin an idea of what
// part of his tree the damage affects.
std::string path;
};
typedef std::shared_ptr<DamageEntry> DamageEntryRef;
class DirFragIdent
{
public:
DirFragIdent(inodeno_t ino_, frag_t frag_)
: ino(ino_), frag(frag_)
{}
bool operator<(const DirFragIdent &rhs) const
{
if (ino == rhs.ino) {
return frag < rhs.frag;
} else {
return ino < rhs.ino;
}
}
inodeno_t ino;
frag_t frag;
};
class DentryIdent
{
public:
DentryIdent(std::string_view dname_, snapid_t snap_id_)
: dname(dname_), snap_id(snap_id_)
{}
bool operator<(const DentryIdent &rhs) const
{
if (dname == rhs.dname) {
return snap_id < rhs.snap_id;
} else {
return dname < rhs.dname;
}
}
std::string dname;
snapid_t snap_id;
};
/**
* Registry of in-RADOS metadata damage identified
* during forward scrub or during normal fetches.
*
* Used to indicate damage to the administrator, and
* to cache known-bad paths so that we don't hit them
* repeatedly.
*
* Callers notifying damage must check return code; if
* an fatal condition is indicated then they should mark the MDS
* rank damaged.
*
* An artificial limit on the number of damage entries
* is imposed to avoid this structure growing indefinitely. If
* a notification causes the limit to be exceeded, the fatal
* condition will be indicated in the return code and the MDS
* rank should be marked damaged.
*
* Protected by MDS::mds_lock
*/
class DamageTable
{
public:
explicit DamageTable(const mds_rank_t rank_)
: rank(rank_)
{
ceph_assert(rank_ != MDS_RANK_NONE);
}
/**
* Return true if no damage entries exist
*/
bool empty() const
{
return by_id.empty();
}
/**
* Indicate that a dirfrag cannot be loaded.
*
* @return true if fatal
*/
bool notify_dirfrag(inodeno_t ino, frag_t frag, std::string_view path);
/**
* Indicate that a particular dentry cannot be loaded.
*
* @return true if fatal
*/
bool notify_dentry(
inodeno_t ino, frag_t frag,
snapid_t snap_id, std::string_view dname, std::string_view path);
/**
* Indicate that a particular Inode could not be loaded by number
*/
bool notify_remote_damaged(inodeno_t ino, std::string_view path);
bool is_dentry_damaged(
const CDir *dir_frag,
std::string_view dname,
const snapid_t snap_id) const;
bool is_dirfrag_damaged(const CDir *dir_frag) const;
bool is_remote_damaged(const inodeno_t ino) const;
void dump(Formatter *f) const;
void erase(damage_entry_id_t damage_id);
protected:
// I need to know my MDS rank so that I can check if
// metadata items are part of my mydir.
const mds_rank_t rank;
bool oversized() const;
// Map of all dirfrags reported damaged
std::map<DirFragIdent, DamageEntryRef> dirfrags;
// Store dentries in a map per dirfrag, so that we can
// readily look up all the bad dentries in a particular
// dirfrag
std::map<DirFragIdent, std::map<DentryIdent, DamageEntryRef> > dentries;
// Map of all inodes which could not be resolved remotely
// (i.e. have probably/possibly missing backtraces)
std::map<inodeno_t, DamageEntryRef> remotes;
// All damage, by ID. This is a secondary index
// to the dirfrag, dentry, remote maps. It exists
// to enable external tools to unambiguously operate
// on particular entries.
std::map<damage_entry_id_t, DamageEntryRef> by_id;
};
#endif // DAMAGE_TABLE_H_
| 4,777 | 23.253807 | 80 | h |
null | ceph-main/src/mds/FSMapUser.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef CEPH_FSMAPCOMPACT_H
#define CEPH_FSMAPCOMPACT_H
#include <map>
#include <string>
#include <string_view>
#include "mds/mdstypes.h"
class FSMapUser {
public:
struct fs_info_t {
fs_info_t() {}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
std::string name;
fs_cluster_id_t cid = FS_CLUSTER_ID_NONE;
};
FSMapUser() {}
epoch_t get_epoch() const { return epoch; }
fs_cluster_id_t get_fs_cid(std::string_view name) const {
for (auto &p : filesystems) {
if (p.second.name == name)
return p.first;
}
return FS_CLUSTER_ID_NONE;
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void print(std::ostream& out) const;
void print_summary(ceph::Formatter *f, std::ostream *out);
static void generate_test_instances(std::list<FSMapUser*>& ls);
std::map<fs_cluster_id_t, fs_info_t> filesystems;
fs_cluster_id_t legacy_client_fscid = FS_CLUSTER_ID_NONE;
epoch_t epoch = 0;
};
WRITE_CLASS_ENCODER_FEATURES(FSMapUser::fs_info_t)
WRITE_CLASS_ENCODER_FEATURES(FSMapUser)
inline std::ostream& operator<<(std::ostream& out, FSMapUser& m) {
m.print_summary(NULL, &out);
return out;
}
#endif
| 1,715 | 25.4 | 70 | h |
null | ceph-main/src/mds/InoTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_INOTABLE_H
#define CEPH_INOTABLE_H
#include "MDSTable.h"
#include "include/interval_set.h"
class MDSRank;
class InoTable : public MDSTable {
public:
explicit InoTable(MDSRank *m) : MDSTable(m, "inotable", true) {}
InoTable() : MDSTable(NULL, "inotable", true) {}
inodeno_t project_alloc_id(inodeno_t id=0);
void apply_alloc_id(inodeno_t id);
void project_alloc_ids(interval_set<inodeno_t>& inos, int want);
void apply_alloc_ids(interval_set<inodeno_t>& inos);
void project_release_ids(const interval_set<inodeno_t>& inos);
void apply_release_ids(const interval_set<inodeno_t>& inos);
void replay_alloc_id(inodeno_t ino);
void replay_alloc_ids(interval_set<inodeno_t>& inos);
void replay_release_ids(interval_set<inodeno_t>& inos);
void replay_reset();
bool repair(inodeno_t id);
bool is_marked_free(inodeno_t id) const;
bool intersects_free(
const interval_set<inodeno_t> &other,
interval_set<inodeno_t> *intersection);
void reset_state() override;
void encode_state(bufferlist& bl) const override {
ENCODE_START(2, 2, bl);
encode(free, bl);
ENCODE_FINISH(bl);
}
void decode_state(bufferlist::const_iterator& bl) override {
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(free, bl);
projected_free = free;
DECODE_FINISH(bl);
}
// To permit enc/decoding in isolation in dencoder
void encode(bufferlist& bl) const {
encode_state(bl);
}
void decode(bufferlist::const_iterator& bl) {
decode_state(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<InoTable*>& ls);
void skip_inos(inodeno_t i);
/**
* If the specified inode is marked as free, mark it as used.
* For use in tools, not normal operations.
*
* @returns true if the inode was previously marked as free
*/
bool force_consume(inodeno_t ino)
{
if (free.contains(ino)) {
free.erase(ino);
return true;
} else {
return false;
}
}
/**
* If this ino is in this rank's range, consume up to and including it.
* For use in tools, when we know the max ino in use and want to make
* sure we're only allocating new inodes from above it.
*
* @return true if the table was modified
*/
bool force_consume_to(inodeno_t ino);
private:
interval_set<inodeno_t> free; // unused ids
interval_set<inodeno_t> projected_free;
};
WRITE_CLASS_ENCODER(InoTable)
#endif
| 2,878 | 26.419048 | 73 | h |
null | ceph-main/src/mds/JournalPointer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef JOURNAL_POINTER_H
#define JOURNAL_POINTER_H
#include "include/encoding.h"
#include "mdstypes.h"
class Objecter;
// This always lives in the same location for a given MDS
// instance, it tells the daemon where to look for the journal.
class JournalPointer {
public:
JournalPointer(int node_id_, int64_t pool_id_) : node_id(node_id_), pool_id(pool_id_) {}
JournalPointer() {}
void encode(bufferlist &bl) const {
ENCODE_START(1, 1, bl);
encode(front, bl);
encode(back, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &bl) {
DECODE_START(1, bl);
decode(front, bl);
decode(back, bl);
DECODE_FINISH(bl);
}
int load(Objecter *objecter);
int save(Objecter *objecter) const;
void save(Objecter *objecter, Context *completion) const;
bool is_null() const {
return front == 0 && back == 0;
}
void dump(Formatter *f) const {
f->open_object_section("journal_pointer");
{
f->dump_unsigned("front", front);
f->dump_unsigned("back", back);
}
f->close_section(); // journal_header
}
static void generate_test_instances(std::list<JournalPointer*> &ls)
{
ls.push_back(new JournalPointer());
ls.push_back(new JournalPointer());
ls.back()->front = 0xdeadbeef;
ls.back()->back = 0xfeedbead;
}
// The currently active journal
inodeno_t front = 0;
// The backup journal, if any (may be 0)
inodeno_t back = 0;
private:
// MDS rank
int node_id = -1;
// Metadata pool ID
int64_t pool_id = -1;
std::string get_object_id() const;
};
WRITE_CLASS_ENCODER(JournalPointer)
#endif // JOURNAL_POINTER_H
| 2,067 | 23.046512 | 90 | h |
null | ceph-main/src/mds/LocalLockC.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LOCALLOCK_H
#define CEPH_LOCALLOCK_H
#include "SimpleLock.h"
class LocalLockC : public SimpleLock {
public:
LocalLockC(MDSCacheObject *o, LockType *t) :
SimpleLock(o, t) {
set_state(LOCK_LOCK); // always.
}
bool is_locallock() const override {
return true;
}
bool can_xlock_local() const {
return !is_wrlocked() && (get_xlock_by() == MutationRef());
}
bool can_wrlock() const {
return !is_xlocked();
}
void get_wrlock(client_t client) {
ceph_assert(can_wrlock());
SimpleLock::get_wrlock();
last_wrlock_client = client;
}
void put_wrlock() {
SimpleLock::put_wrlock();
if (get_num_wrlocks() == 0)
last_wrlock_client = client_t();
}
client_t get_last_wrlock_client() const {
return last_wrlock_client;
}
void print(std::ostream& out) const override {
out << "(";
_print(out);
if (last_wrlock_client >= 0)
out << " last_client=" << last_wrlock_client;
out << ")";
}
private:
client_t last_wrlock_client;
};
#endif
| 1,467 | 21.584615 | 71 | h |
null | ceph-main/src/mds/LogEvent.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LOGEVENT_H
#define CEPH_LOGEVENT_H
#define EVENT_NEW_ENCODING 0 // indicates that the encoding is versioned
#define EVENT_UNUSED 1 // was previously EVENT_STRING
#define EVENT_SUBTREEMAP 2
#define EVENT_EXPORT 3
#define EVENT_IMPORTSTART 4
#define EVENT_IMPORTFINISH 5
#define EVENT_FRAGMENT 6
#define EVENT_RESETJOURNAL 9
#define EVENT_SESSION 10
#define EVENT_SESSIONS_OLD 11
#define EVENT_SESSIONS 12
#define EVENT_UPDATE 20
#define EVENT_PEERUPDATE 21
#define EVENT_OPEN 22
#define EVENT_COMMITTED 23
#define EVENT_PURGED 24
#define EVENT_TABLECLIENT 42
#define EVENT_TABLESERVER 43
#define EVENT_SUBTREEMAP_TEST 50
#define EVENT_NOOP 51
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "include/utime.h"
class MDSRank;
class LogSegment;
class EMetaBlob;
// generic log event
class LogEvent {
public:
typedef __u32 EventType;
friend class MDLog;
LogEvent() = delete;
explicit LogEvent(int t) : _type(t) {}
LogEvent(const LogEvent&) = delete;
LogEvent& operator=(const LogEvent&) = delete;
virtual ~LogEvent() {}
std::string_view get_type_str() const;
static EventType str_to_type(std::string_view str);
EventType get_type() const { return _type; }
void set_type(EventType t) { _type = t; }
uint64_t get_start_off() const { return _start_off; }
void set_start_off(uint64_t o) { _start_off = o; }
utime_t get_stamp() const { return stamp; }
void set_stamp(utime_t t) { stamp = t; }
// encoding
virtual void encode(bufferlist& bl, uint64_t features) const = 0;
virtual void decode(bufferlist::const_iterator &) = 0;
static std::unique_ptr<LogEvent> decode_event(bufferlist::const_iterator);
virtual void dump(Formatter *f) const = 0;
void encode_with_header(bufferlist& bl, uint64_t features) {
using ceph::encode;
encode(EVENT_NEW_ENCODING, bl);
ENCODE_START(1, 1, bl)
encode(_type, bl);
this->encode(bl, features);
ENCODE_FINISH(bl);
}
virtual void print(std::ostream& out) const {
out << "event(" << _type << ")";
}
/*** live journal ***/
/* update_segment() - adjust any state we need to in the LogSegment
*/
virtual void update_segment() { }
/*** recovery ***/
/* replay() - replay given event. this is idempotent.
*/
virtual void replay(MDSRank *m) { ceph_abort(); }
/**
* If the subclass embeds a MetaBlob, return it here so that
* tools can examine metablobs while traversing lists of LogEvent.
*/
virtual EMetaBlob *get_metablob() { return NULL; }
protected:
LogSegment* get_segment() { return _segment; }
LogSegment const* get_segment() const { return _segment; }
utime_t stamp;
private:
static const std::map<std::string, LogEvent::EventType> types;
static std::unique_ptr<LogEvent> decode_event(bufferlist::const_iterator&, EventType);
EventType _type = 0;
uint64_t _start_off = 0;
LogSegment *_segment = nullptr;
};
inline std::ostream& operator<<(std::ostream& out, const LogEvent &le) {
le.print(out);
return out;
}
#endif
| 3,523 | 25.298507 | 88 | h |
null | ceph-main/src/mds/LogSegment.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LOGSEGMENT_H
#define CEPH_LOGSEGMENT_H
#include "include/elist.h"
#include "include/interval_set.h"
#include "include/Context.h"
#include "MDSContext.h"
#include "mdstypes.h"
#include "CInode.h"
#include "CDentry.h"
#include "CDir.h"
#include "include/unordered_set.h"
using ceph::unordered_set;
class CDir;
class CInode;
class CDentry;
class MDSRank;
struct MDPeerUpdate;
class LogSegment {
public:
using seq_t = uint64_t;
LogSegment(uint64_t _seq, loff_t off=-1) :
seq(_seq), offset(off), end(off),
dirty_dirfrags(member_offset(CDir, item_dirty)),
new_dirfrags(member_offset(CDir, item_new)),
dirty_inodes(member_offset(CInode, item_dirty)),
dirty_dentries(member_offset(CDentry, item_dirty)),
open_files(member_offset(CInode, item_open_file)),
dirty_parent_inodes(member_offset(CInode, item_dirty_parent)),
dirty_dirfrag_dir(member_offset(CInode, item_dirty_dirfrag_dir)),
dirty_dirfrag_nest(member_offset(CInode, item_dirty_dirfrag_nest)),
dirty_dirfrag_dirfragtree(member_offset(CInode, item_dirty_dirfrag_dirfragtree))
{}
void try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int op_prio);
void purge_inodes_finish(interval_set<inodeno_t>& inos){
purging_inodes.subtract(inos);
if (NULL != purged_cb &&
purging_inodes.empty())
purged_cb->complete(0);
}
void set_purged_cb(MDSContext* c){
ceph_assert(purged_cb == NULL);
purged_cb = c;
}
void wait_for_expiry(MDSContext *c)
{
ceph_assert(c != NULL);
expiry_waiters.push_back(c);
}
const seq_t seq;
uint64_t offset, end;
int num_events = 0;
// dirty items
elist<CDir*> dirty_dirfrags, new_dirfrags;
elist<CInode*> dirty_inodes;
elist<CDentry*> dirty_dentries;
elist<CInode*> open_files;
elist<CInode*> dirty_parent_inodes;
elist<CInode*> dirty_dirfrag_dir;
elist<CInode*> dirty_dirfrag_nest;
elist<CInode*> dirty_dirfrag_dirfragtree;
std::set<CInode*> truncating_inodes;
interval_set<inodeno_t> purging_inodes;
MDSContext* purged_cb = nullptr;
std::map<int, ceph::unordered_set<version_t> > pending_commit_tids; // mdstable
std::set<metareqid_t> uncommitted_leaders;
std::set<metareqid_t> uncommitted_peers;
std::set<dirfrag_t> uncommitted_fragments;
// client request ids
std::map<int, ceph_tid_t> last_client_tids;
// potentially dirty sessions
std::set<entity_name_t> touched_sessions;
// table version
version_t inotablev = 0;
version_t sessionmapv = 0;
std::map<int,version_t> tablev;
MDSContext::vec expiry_waiters;
};
#endif
| 3,014 | 26.409091 | 84 | h |
null | ceph-main/src/mds/MDBalancer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDBALANCER_H
#define CEPH_MDBALANCER_H
#include "include/types.h"
#include "common/Clock.h"
#include "common/Cond.h"
#include "msg/Message.h"
#include "messages/MHeartbeat.h"
#include "MDSMap.h"
class MDSRank;
class MHeartbeat;
class CInode;
class CDir;
class Messenger;
class MonClient;
class MDBalancer {
public:
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
friend class C_Bal_SendHeartbeat;
MDBalancer(MDSRank *m, Messenger *msgr, MonClient *monc);
void handle_conf_change(const std::set<std::string>& changed, const MDSMap& mds_map);
int proc_message(const cref_t<Message> &m);
/**
* Regularly called upkeep function.
*
* Sends MHeartbeat messages to the mons.
*/
void tick();
void handle_export_pins(void);
void subtract_export(CDir *ex);
void add_import(CDir *im);
void adjust_pop_for_rename(CDir *pdir, CDir *dir, bool inc);
void hit_inode(CInode *in, int type);
void hit_dir(CDir *dir, int type, double amount=1.0);
void queue_split(const CDir *dir, bool fast);
void queue_merge(CDir *dir);
bool is_fragment_pending(dirfrag_t df) {
return split_pending.count(df) || merge_pending.count(df);
}
/**
* Based on size and configuration, decide whether to issue a queue_split
* or queue_merge for this CDir.
*
* \param hot whether the directory's temperature is enough to split it
*/
void maybe_fragment(CDir *dir, bool hot);
void handle_mds_failure(mds_rank_t who);
int dump_loads(Formatter *f, int64_t depth = -1) const;
private:
typedef struct {
std::map<mds_rank_t, double> targets;
std::map<mds_rank_t, double> imported;
std::map<mds_rank_t, double> exported;
} balance_state_t;
//set up the rebalancing targets for export and do one if the
//MDSMap is up to date
void prep_rebalance(int beat);
int mantle_prep_rebalance();
mds_load_t get_load();
int localize_balancer();
void send_heartbeat();
void handle_heartbeat(const cref_t<MHeartbeat> &m);
void find_exports(CDir *dir,
double amount,
std::vector<CDir*>* exports,
double& have,
std::set<CDir*>& already_exporting);
double try_match(balance_state_t &state,
mds_rank_t ex, double& maxex,
mds_rank_t im, double& maxim);
double get_maxim(balance_state_t &state, mds_rank_t im, double im_target_load) {
return im_target_load - mds_meta_load[im] - state.imported[im];
}
double get_maxex(balance_state_t &state, mds_rank_t ex, double ex_target_load) {
return mds_meta_load[ex] - ex_target_load - state.exported[ex];
}
/**
* Try to rebalance.
*
* Check if the monitor has recorded the current export targets;
* if it has then do the actual export. Otherwise send off our
* export targets message again.
*/
void try_rebalance(balance_state_t& state);
bool test_rank_mask(mds_rank_t rank);
bool bal_fragment_dirs;
int64_t bal_fragment_interval;
static const unsigned int AUTH_TREES_THRESHOLD = 5;
MDSRank *mds;
Messenger *messenger;
MonClient *mon_client;
int beat_epoch = 0;
std::string bal_code;
std::string bal_version;
time last_heartbeat = clock::zero();
time last_sample = clock::zero();
time rebalance_time = clock::zero(); //ensure a consistent view of load for rebalance
time last_get_load = clock::zero();
uint64_t last_num_requests = 0;
uint64_t last_cpu_time = 0;
uint64_t last_num_traverse = 0;
uint64_t last_num_traverse_hit = 0;
// Dirfrags which are marked to be passed on to MDCache::[split|merge]_dir
// just as soon as a delayed context comes back and triggers it.
// These sets just prevent us from spawning extra timer contexts for
// dirfrags that already have one in flight.
std::set<dirfrag_t> split_pending, merge_pending;
// per-epoch scatter/gathered info
std::map<mds_rank_t, mds_load_t> mds_load;
std::map<mds_rank_t, double> mds_meta_load;
std::map<mds_rank_t, std::map<mds_rank_t, float> > mds_import_map;
std::map<mds_rank_t, int> mds_last_epoch_under_map;
// per-epoch state
double my_load = 0;
double target_load = 0;
};
#endif
| 4,646 | 27.863354 | 87 | h |
null | ceph-main/src/mds/MDSAuthCaps.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef MDS_AUTH_CAPS_H
#define MDS_AUTH_CAPS_H
#include <ostream>
#include <string>
#include <string_view>
#include <vector>
#include "include/common_fwd.h"
#include "include/types.h"
#include "common/debug.h"
#include "mdstypes.h"
// unix-style capabilities
enum {
MAY_READ = (1 << 0),
MAY_WRITE = (1 << 1),
MAY_EXECUTE = (1 << 2),
MAY_CHOWN = (1 << 4),
MAY_CHGRP = (1 << 5),
MAY_SET_VXATTR = (1 << 6),
MAY_SNAPSHOT = (1 << 7),
MAY_FULL = (1 << 8),
};
// what we can do
struct MDSCapSpec {
static const unsigned ALL = (1 << 0);
static const unsigned READ = (1 << 1);
static const unsigned WRITE = (1 << 2);
// if the capability permits setting vxattrs (layout, quota, etc)
static const unsigned SET_VXATTR = (1 << 3);
// if the capability permits mksnap/rmsnap
static const unsigned SNAPSHOT = (1 << 4);
// if the capability permits to bypass osd full check
static const unsigned FULL = (1 << 5);
static const unsigned RW = (READ|WRITE);
static const unsigned RWF = (READ|WRITE|FULL);
static const unsigned RWP = (READ|WRITE|SET_VXATTR);
static const unsigned RWS = (READ|WRITE|SNAPSHOT);
static const unsigned RWFP = (READ|WRITE|FULL|SET_VXATTR);
static const unsigned RWFS = (READ|WRITE|FULL|SNAPSHOT);
static const unsigned RWPS = (READ|WRITE|SET_VXATTR|SNAPSHOT);
static const unsigned RWFPS = (READ|WRITE|FULL|SET_VXATTR|SNAPSHOT);
MDSCapSpec() = default;
MDSCapSpec(unsigned _caps) : caps(_caps) {
if (caps & ALL)
caps |= RWFPS;
}
bool allow_all() const {
return (caps & ALL);
}
bool allow_read() const {
return (caps & READ);
}
bool allow_write() const {
return (caps & WRITE);
}
bool allows(bool r, bool w) const {
if (allow_all())
return true;
if (r && !allow_read())
return false;
if (w && !allow_write())
return false;
return true;
}
bool allow_snapshot() const {
return (caps & SNAPSHOT);
}
bool allow_set_vxattr() const {
return (caps & SET_VXATTR);
}
bool allow_full() const {
return (caps & FULL);
}
private:
unsigned caps = 0;
};
// conditions before we are allowed to do it
struct MDSCapMatch {
static const int64_t MDS_AUTH_UID_ANY = -1;
MDSCapMatch() {}
MDSCapMatch(const std::string& fsname_, const std::string& path_,
bool root_squash_, int64_t uid_=MDS_AUTH_UID_ANY,
const std::vector<gid_t>& gids_={}) {
fs_name = std::move(fsname_);
path = std::move(path_);
root_squash = root_squash_;
uid = (uid_ == 0) ? -1 : uid_;
gids = gids_;
normalize_path();
}
void normalize_path();
bool is_match_all() const
{
return uid == MDS_AUTH_UID_ANY && path == "";
}
// check whether this grant matches against a given file and caller uid:gid
bool match(std::string_view target_path,
const int caller_uid,
const int caller_gid,
const std::vector<uint64_t> *caller_gid_list) const;
/**
* Check whether this path *might* be accessible (actual permission
* depends on the stronger check in match()).
*
* @param target_path filesystem path without leading '/'
*/
bool match_path(std::string_view target_path) const;
// Require UID to be equal to this, if !=MDS_AUTH_UID_ANY
int64_t uid = MDS_AUTH_UID_ANY;
std::vector<gid_t> gids; // Use these GIDs
std::string path; // Require path to be child of this (may be "" or "/" for any)
std::string fs_name;
bool root_squash=false;
};
struct MDSCapGrant {
MDSCapGrant(const MDSCapSpec &spec_, const MDSCapMatch &match_,
boost::optional<std::string> n)
: spec(spec_), match(match_) {
if (n) {
network = *n;
parse_network();
}
}
MDSCapGrant() {}
void parse_network();
MDSCapSpec spec;
MDSCapMatch match;
std::string network;
entity_addr_t network_parsed;
unsigned network_prefix = 0;
bool network_valid = true;
};
class MDSAuthCaps
{
public:
MDSAuthCaps() = default;
// this ctor is used by spirit/phoenix
explicit MDSAuthCaps(const std::vector<MDSCapGrant>& grants_) : grants(grants_) {}
void clear() {
grants.clear();
}
void set_allow_all();
bool parse(std::string_view str, std::ostream *err);
bool allow_all() const;
bool is_capable(std::string_view inode_path,
uid_t inode_uid, gid_t inode_gid, unsigned inode_mode,
uid_t uid, gid_t gid, const std::vector<uint64_t> *caller_gid_list,
unsigned mask, uid_t new_uid, gid_t new_gid,
const entity_addr_t& addr) const;
bool path_capable(std::string_view inode_path) const;
bool fs_name_capable(std::string_view fs_name, unsigned mask) const {
if (allow_all()) {
return true;
}
for (const MDSCapGrant &g : grants) {
if (g.match.fs_name == fs_name || g.match.fs_name.empty() ||
g.match.fs_name == "*") {
if (mask & MAY_READ && g.spec.allow_read()) {
return true;
}
if (mask & MAY_WRITE && g.spec.allow_write()) {
return true;
}
}
}
return false;
}
friend std::ostream &operator<<(std::ostream &out, const MDSAuthCaps &cap);
private:
std::vector<MDSCapGrant> grants;
};
std::ostream &operator<<(std::ostream &out, const MDSCapMatch &match);
std::ostream &operator<<(std::ostream &out, const MDSCapSpec &spec);
std::ostream &operator<<(std::ostream &out, const MDSCapGrant &grant);
std::ostream &operator<<(std::ostream &out, const MDSAuthCaps &cap);
#endif // MDS_AUTH_CAPS_H
| 5,844 | 24.977778 | 84 | h |
null | ceph-main/src/mds/MDSCacheObject.h | #ifndef CEPH_MDSCACHEOBJECT_H
#define CEPH_MDSCACHEOBJECT_H
#include <ostream>
#include <string_view>
#include "common/config.h"
#include "include/Context.h"
#include "include/ceph_assert.h"
#include "include/mempool.h"
#include "include/types.h"
#include "include/xlist.h"
#include "mdstypes.h"
#include "MDSContext.h"
#include "include/elist.h"
#define MDS_REF_SET // define me for improved debug output, sanity checking
//#define MDS_AUTHPIN_SET // define me for debugging auth pin leaks
//#define MDS_VERIFY_FRAGSTAT // do (slow) sanity checking on frags
/*
* for metadata leases to clients
*/
class MLock;
class SimpleLock;
class MDSCacheObject;
class MDSContext;
namespace ceph {
class Formatter;
}
struct ClientLease {
ClientLease(client_t c, MDSCacheObject *p) :
client(c), parent(p),
item_session_lease(this),
item_lease(this) { }
ClientLease() = delete;
client_t client;
MDSCacheObject *parent;
ceph_seq_t seq = 0;
utime_t ttl;
xlist<ClientLease*>::item item_session_lease; // per-session list
xlist<ClientLease*>::item item_lease; // global list
};
// print hack
struct mdsco_db_line_prefix {
explicit mdsco_db_line_prefix(MDSCacheObject *o) : object(o) {}
MDSCacheObject *object;
};
class MDSCacheObject {
public:
typedef mempool::mds_co::compact_map<mds_rank_t,unsigned> replica_map_type;
struct ptr_lt {
bool operator()(const MDSCacheObject* l, const MDSCacheObject* r) const {
return l->is_lt(r);
}
};
// -- pins --
const static int PIN_REPLICATED = 1000;
const static int PIN_DIRTY = 1001;
const static int PIN_LOCK = -1002;
const static int PIN_REQUEST = -1003;
const static int PIN_WAITER = 1004;
const static int PIN_DIRTYSCATTERED = -1005;
static const int PIN_AUTHPIN = 1006;
static const int PIN_PTRWAITER = -1007;
const static int PIN_TEMPEXPORTING = 1008; // temp pin between encode_ and finish_export
static const int PIN_CLIENTLEASE = 1009;
static const int PIN_DISCOVERBASE = 1010;
static const int PIN_SCRUBQUEUE = 1011; // for scrub of inode and dir
// -- state --
const static int STATE_AUTH = (1<<30);
const static int STATE_DIRTY = (1<<29);
const static int STATE_NOTIFYREF = (1<<28); // notify dropping ref drop through _put()
const static int STATE_REJOINING = (1<<27); // replica has not joined w/ primary copy
const static int STATE_REJOINUNDEF = (1<<26); // contents undefined.
// -- wait --
const static uint64_t WAIT_ORDERED = (1ull<<61);
const static uint64_t WAIT_SINGLEAUTH = (1ull<<60);
const static uint64_t WAIT_UNFREEZE = (1ull<<59); // pka AUTHPINNABLE
elist<MDSCacheObject*>::item item_scrub; // for scrub inode or dir
MDSCacheObject() {}
virtual ~MDSCacheObject() {}
std::string_view generic_pin_name(int p) const;
// printing
virtual void print(std::ostream& out) = 0;
virtual std::ostream& print_db_line_prefix(std::ostream& out) {
return out << "mdscacheobject(" << this << ") ";
}
unsigned get_state() const { return state; }
unsigned state_test(unsigned mask) const { return (state & mask); }
void state_clear(unsigned mask) { state &= ~mask; }
void state_set(unsigned mask) { state |= mask; }
void state_reset(unsigned s) { state = s; }
bool is_auth() const { return state_test(STATE_AUTH); }
bool is_dirty() const { return state_test(STATE_DIRTY); }
bool is_clean() const { return !is_dirty(); }
bool is_rejoining() const { return state_test(STATE_REJOINING); }
// --------------------------------------------
// authority
virtual mds_authority_t authority() const = 0;
virtual bool is_ambiguous_auth() const {
return authority().second != CDIR_AUTH_UNKNOWN;
}
int get_num_ref(int by = -1) const {
#ifdef MDS_REF_SET
if (by >= 0) {
if (ref_map.find(by) == ref_map.end()) {
return 0;
} else {
return ref_map.find(by)->second;
}
}
#endif
return ref;
}
virtual std::string_view pin_name(int by) const = 0;
//bool is_pinned_by(int by) { return ref_set.count(by); }
//multiset<int>& get_ref_set() { return ref_set; }
virtual void last_put() {}
virtual void bad_put(int by) {
#ifdef MDS_REF_SET
ceph_assert(ref_map[by] > 0);
#endif
ceph_assert(ref > 0);
}
virtual void _put() {}
void put(int by) {
#ifdef MDS_REF_SET
if (ref == 0 || ref_map[by] == 0) {
#else
if (ref == 0) {
#endif
bad_put(by);
} else {
ref--;
#ifdef MDS_REF_SET
ref_map[by]--;
#endif
if (ref == 0)
last_put();
if (state_test(STATE_NOTIFYREF))
_put();
}
}
virtual void first_get() {}
virtual void bad_get(int by) {
#ifdef MDS_REF_SET
ceph_assert(by < 0 || ref_map[by] == 0);
#endif
ceph_abort();
}
void get(int by) {
if (ref == 0)
first_get();
ref++;
#ifdef MDS_REF_SET
if (ref_map.find(by) == ref_map.end())
ref_map[by] = 0;
ref_map[by]++;
#endif
}
void print_pin_set(std::ostream& out) const {
#ifdef MDS_REF_SET
for(auto const &p : ref_map) {
out << " " << pin_name(p.first) << "=" << p.second;
}
#else
out << " nref=" << ref;
#endif
}
int get_num_auth_pins() const { return auth_pins; }
#ifdef MDS_AUTHPIN_SET
void print_authpin_set(std::ostream& out) const {
out << " (" << auth_pin_set << ")";
}
#endif
void dump_states(ceph::Formatter *f) const;
void dump(ceph::Formatter *f) const;
// auth pins
enum {
// can_auth_pin() error codes
ERR_NOT_AUTH = 1,
ERR_EXPORTING_TREE,
ERR_FRAGMENTING_DIR,
ERR_EXPORTING_INODE,
};
virtual bool can_auth_pin(int *err_code=nullptr) const = 0;
virtual void auth_pin(void *who) = 0;
virtual void auth_unpin(void *who) = 0;
virtual bool is_frozen() const = 0;
virtual bool is_freezing() const = 0;
virtual bool is_freezing_or_frozen() const {
return is_frozen() || is_freezing();
}
bool is_replicated() const { return !get_replicas().empty(); }
bool is_replica(mds_rank_t mds) const { return get_replicas().count(mds); }
int num_replicas() const { return get_replicas().size(); }
unsigned add_replica(mds_rank_t mds) {
if (get_replicas().count(mds))
return ++get_replicas()[mds]; // inc nonce
if (get_replicas().empty())
get(PIN_REPLICATED);
return get_replicas()[mds] = 1;
}
void add_replica(mds_rank_t mds, unsigned nonce) {
if (get_replicas().empty())
get(PIN_REPLICATED);
get_replicas()[mds] = nonce;
}
unsigned get_replica_nonce(mds_rank_t mds) {
ceph_assert(get_replicas().count(mds));
return get_replicas()[mds];
}
void remove_replica(mds_rank_t mds) {
ceph_assert(get_replicas().count(mds));
get_replicas().erase(mds);
if (get_replicas().empty()) {
put(PIN_REPLICATED);
}
}
void clear_replica_map() {
if (!get_replicas().empty())
put(PIN_REPLICATED);
replica_map.clear();
}
replica_map_type& get_replicas() { return replica_map; }
const replica_map_type& get_replicas() const { return replica_map; }
void list_replicas(std::set<mds_rank_t>& ls) const {
for (const auto &p : get_replicas()) {
ls.insert(p.first);
}
}
unsigned get_replica_nonce() const { return replica_nonce; }
void set_replica_nonce(unsigned n) { replica_nonce = n; }
bool is_waiter_for(uint64_t mask, uint64_t min=0);
virtual void add_waiter(uint64_t mask, MDSContext *c) {
if (waiting.empty())
get(PIN_WAITER);
uint64_t seq = 0;
if (mask & WAIT_ORDERED) {
seq = ++last_wait_seq;
mask &= ~WAIT_ORDERED;
}
waiting.insert(std::pair<uint64_t, std::pair<uint64_t, MDSContext*> >(
mask,
std::pair<uint64_t, MDSContext*>(seq, c)));
// pdout(10,g_conf()->debug_mds) << (mdsco_db_line_prefix(this))
// << "add_waiter " << hex << mask << dec << " " << c
// << " on " << *this
// << dendl;
}
virtual void take_waiting(uint64_t mask, MDSContext::vec& ls);
void finish_waiting(uint64_t mask, int result = 0);
// ---------------------------------------------
// locking
// noop unless overloaded.
virtual SimpleLock* get_lock(int type) { ceph_abort(); return 0; }
virtual void set_object_info(MDSCacheObjectInfo &info) { ceph_abort(); }
virtual void encode_lock_state(int type, ceph::buffer::list& bl) { ceph_abort(); }
virtual void decode_lock_state(int type, const ceph::buffer::list& bl) { ceph_abort(); }
virtual void finish_lock_waiters(int type, uint64_t mask, int r=0) { ceph_abort(); }
virtual void add_lock_waiter(int type, uint64_t mask, MDSContext *c) { ceph_abort(); }
virtual bool is_lock_waiting(int type, uint64_t mask) { ceph_abort(); return false; }
virtual void clear_dirty_scattered(int type) { ceph_abort(); }
// ---------------------------------------------
// ordering
virtual bool is_lt(const MDSCacheObject *r) const = 0;
// state
protected:
__u32 state = 0; // state bits
// pins
__s32 ref = 0; // reference count
#ifdef MDS_REF_SET
mempool::mds_co::flat_map<int,int> ref_map;
#endif
int auth_pins = 0;
#ifdef MDS_AUTHPIN_SET
mempool::mds_co::multiset<void*> auth_pin_set;
#endif
// replication (across mds cluster)
unsigned replica_nonce = 0; // [replica] defined on replica
replica_map_type replica_map; // [auth] mds -> nonce
// ---------------------------------------------
// waiting
private:
mempool::mds_co::compact_multimap<uint64_t, std::pair<uint64_t, MDSContext*>> waiting;
static uint64_t last_wait_seq;
};
std::ostream& operator<<(std::ostream& out, const mdsco_db_line_prefix& o);
// printer
std::ostream& operator<<(std::ostream& out, const MDSCacheObject &o);
inline std::ostream& operator<<(std::ostream& out, MDSCacheObject &o) {
o.print(out);
return out;
}
inline std::ostream& operator<<(std::ostream& out, const mdsco_db_line_prefix& o) {
o.object->print_db_line_prefix(out);
return out;
}
#endif
| 9,987 | 28.119534 | 91 | h |
null | ceph-main/src/mds/MDSContext.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef MDS_CONTEXT_H
#define MDS_CONTEXT_H
#include <vector>
#include <deque>
#include "include/Context.h"
#include "include/elist.h"
#include "include/spinlock.h"
#include "common/ceph_time.h"
class MDSRank;
/**
* Completion which has access to a reference to the global MDS instance.
*
* This class exists so that Context subclasses can provide the MDS pointer
* from a pointer they already had, e.g. MDCache or Locker, rather than
* necessarily having to carry around an extra MDS* pointer.
*/
class MDSContext : public Context
{
public:
template<template<typename> class A>
using vec_alloc = std::vector<MDSContext*, A<MDSContext*>>;
using vec = vec_alloc<std::allocator>;
template<template<typename> class A>
using que_alloc = std::deque<MDSContext*, A<MDSContext*>>;
using que = que_alloc<std::allocator>;
void complete(int r) override;
virtual MDSRank *get_mds() = 0;
};
/* Children of this could have used multiple inheritance with MDSHolder and
* MDSContext but then get_mds() would be ambiguous.
*/
template<class T>
class MDSHolder : public T
{
public:
MDSRank* get_mds() override {
return mds;
}
protected:
MDSHolder() = delete;
MDSHolder(MDSRank* mds) : mds(mds) {
ceph_assert(mds != nullptr);
}
MDSRank* mds;
};
/**
* General purpose, lets you pass in an MDS pointer.
*/
class MDSInternalContext : public MDSHolder<MDSContext>
{
public:
MDSInternalContext() = delete;
protected:
explicit MDSInternalContext(MDSRank *mds_) : MDSHolder(mds_) {}
};
/**
* Wrap a regular Context up as an Internal context. Useful
* if you're trying to work with one of our more generic frameworks.
*/
class MDSInternalContextWrapper : public MDSInternalContext
{
protected:
Context *fin = nullptr;
void finish(int r) override;
public:
MDSInternalContextWrapper(MDSRank *m, Context *c) : MDSInternalContext(m), fin(c) {}
};
class MDSIOContextBase : public MDSContext
{
public:
MDSIOContextBase(bool track=true);
virtual ~MDSIOContextBase();
MDSIOContextBase(const MDSIOContextBase&) = delete;
MDSIOContextBase& operator=(const MDSIOContextBase&) = delete;
void complete(int r) override;
virtual void print(std::ostream& out) const = 0;
static bool check_ios_in_flight(ceph::coarse_mono_time cutoff,
std::string& slow_count,
ceph::coarse_mono_time& oldest);
private:
ceph::coarse_mono_time created_at;
elist<MDSIOContextBase*>::item list_item;
friend struct MDSIOContextList;
};
/**
* Completion for an log operation, takes big MDSRank lock
* before executing finish function. Update log's safe pos
* after finish function return.
*/
class MDSLogContextBase : public MDSIOContextBase
{
protected:
uint64_t write_pos = 0;
public:
MDSLogContextBase() = default;
void complete(int r) final;
void set_write_pos(uint64_t wp) { write_pos = wp; }
virtual void pre_finish(int r) {}
void print(std::ostream& out) const override {
out << "log_event(" << write_pos << ")";
}
};
/**
* Completion for an I/O operation, takes big MDSRank lock
* before executing finish function.
*/
class MDSIOContext : public MDSHolder<MDSIOContextBase>
{
public:
explicit MDSIOContext(MDSRank *mds_) : MDSHolder(mds_) {}
};
/**
* Wrap a regular Context up as an IO Context. Useful
* if you're trying to work with one of our more generic frameworks.
*/
class MDSIOContextWrapper : public MDSHolder<MDSIOContextBase>
{
protected:
Context *fin;
public:
MDSIOContextWrapper(MDSRank *m, Context *c) : MDSHolder(m), fin(c) {}
void finish(int r) override;
void print(std::ostream& out) const override {
out << "io_context_wrapper(" << fin << ")";
}
};
/**
* No-op for callers expecting MDSInternalContext
*/
class C_MDSInternalNoop : public MDSContext
{
public:
void finish(int r) override {}
void complete(int r) override { delete this; }
protected:
MDSRank* get_mds() override final {ceph_abort();}
};
/**
* This class is used where you have an MDSInternalContext but
* you sometimes want to call it back from an I/O completion.
*/
class C_IO_Wrapper : public MDSIOContext
{
protected:
bool async;
MDSContext *wrapped;
void finish(int r) override {
wrapped->complete(r);
wrapped = nullptr;
}
public:
C_IO_Wrapper(MDSRank *mds_, MDSContext *wrapped_) :
MDSIOContext(mds_), async(true), wrapped(wrapped_) {
ceph_assert(wrapped != NULL);
}
~C_IO_Wrapper() override {
if (wrapped != nullptr) {
delete wrapped;
wrapped = nullptr;
}
}
void complete(int r) final;
void print(std::ostream& out) const override {
out << "io_wrapper(" << wrapped << ")";
}
};
using MDSGather = C_GatherBase<MDSContext, C_MDSInternalNoop>;
using MDSGatherBuilder = C_GatherBuilderBase<MDSContext, MDSGather>;
using MDSContextFactory = ContextFactory<MDSContext>;
#endif // MDS_CONTEXT_H
| 5,255 | 23.676056 | 86 | h |
null | ceph-main/src/mds/MDSContinuation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/Continuation.h"
#include "mds/Mutation.h"
#include "mds/Server.h"
#include "MDSContext.h"
class MDSContinuation : public Continuation {
protected:
Server *server;
MDSInternalContext *get_internal_callback(int stage) {
return new MDSInternalContextWrapper(server->mds, get_callback(stage));
}
MDSIOContextBase *get_io_callback(int stage) {
return new MDSIOContextWrapper(server->mds, get_callback(stage));
}
public:
MDSContinuation(Server *s) :
Continuation(NULL), server(s) {}
};
| 924 | 26.205882 | 75 | h |
null | ceph-main/src/mds/MDSDaemon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_H
#define CEPH_MDS_H
#include <string_view>
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "messages/MGenericMessage.h"
#include "messages/MMDSMap.h"
#include "messages/MMonCommand.h"
#include "common/LogClient.h"
#include "common/ceph_mutex.h"
#include "common/fair_mutex.h"
#include "common/Timer.h"
#include "include/Context.h"
#include "include/types.h"
#include "mgr/MgrClient.h"
#include "msg/Dispatcher.h"
#include "Beacon.h"
#include "MDSMap.h"
#include "MDSRank.h"
#define CEPH_MDS_PROTOCOL 36 /* cluster internal */
class Messenger;
class MonClient;
class MDSDaemon : public Dispatcher {
public:
MDSDaemon(std::string_view n, Messenger *m, MonClient *mc,
boost::asio::io_context& ioctx);
~MDSDaemon() override;
mono_time get_starttime() const {
return starttime;
}
std::chrono::duration<double> get_uptime() const {
mono_time now = mono_clock::now();
return std::chrono::duration<double>(now-starttime);
}
// handle a signal (e.g., SIGTERM)
void handle_signal(int signum);
int init();
/**
* Hint at whether we were shutdown gracefully (i.e. we were only
* in standby, or our rank was stopped). Should be removed once
* we handle shutdown properly (e.g. clear out all message queues)
* such that deleting xlists doesn't assert.
*/
bool is_clean_shutdown();
/* Global MDS lock: every time someone takes this, they must
* also check the `stopping` flag. If stopping is true, you
* must either do nothing and immediately drop the lock, or
* never drop the lock again (i.e. call respawn()) */
ceph::fair_mutex mds_lock{"MDSDaemon::mds_lock"};
bool stopping = false;
class CommonSafeTimer<ceph::fair_mutex> timer;
std::string gss_ktfile_client{};
int orig_argc;
const char **orig_argv;
protected:
// admin socket handling
friend class MDSSocketHook;
// special message types
friend class C_MDS_Send_Command_Reply;
void reset_tick();
void wait_for_omap_osds();
void set_up_admin_socket();
void clean_up_admin_socket();
void check_ops_in_flight(); // send off any slow ops to monitor
void asok_command(
std::string_view command,
const cmdmap_t& cmdmap,
Formatter *f,
const bufferlist &inbl,
std::function<void(int,const std::string&,bufferlist&)> on_finish);
void dump_status(Formatter *f);
/**
* Terminate this daemon process.
*
* This function will return, but once it does so the calling thread
* must do no more work as all subsystems will have been shut down.
*/
void suicide();
/**
* Start a new daemon process with the same command line parameters that
* this process was run with, then terminate this process
*/
void respawn();
void tick();
bool handle_core_message(const cref_t<Message> &m);
void handle_command(const cref_t<MCommand> &m);
void handle_mds_map(const cref_t<MMDSMap> &m);
Beacon beacon;
std::string name;
Messenger *messenger;
MonClient *monc;
boost::asio::io_context& ioctx;
MgrClient mgrc;
std::unique_ptr<MDSMap> mdsmap;
LogClient log_client;
LogChannelRef clog;
MDSRankDispatcher *mds_rank = nullptr;
// tick and other timer fun
Context *tick_event = nullptr;
class MDSSocketHook *asok_hook = nullptr;
private:
bool ms_dispatch2(const ref_t<Message> &m) override;
int ms_handle_authentication(Connection *con) override;
void ms_handle_accept(Connection *con) override;
void ms_handle_connect(Connection *con) override;
bool ms_handle_reset(Connection *con) override;
void ms_handle_remote_reset(Connection *con) override;
bool ms_handle_refused(Connection *con) override;
bool parse_caps(const AuthCapsInfo&, MDSAuthCaps&);
mono_time starttime = mono_clock::zero();
};
#endif
| 4,242 | 25.191358 | 74 | h |
null | ceph-main/src/mds/MDSPerfMetricTypes.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_MDS_PERF_METRIC_TYPES_H
#define CEPH_MDS_PERF_METRIC_TYPES_H
#include <ostream>
#include "include/denc.h"
#include "include/utime.h"
#include "mdstypes.h"
enum UpdateType : uint32_t {
UPDATE_TYPE_REFRESH = 0,
UPDATE_TYPE_REMOVE,
};
struct CapHitMetric {
uint64_t hits = 0;
uint64_t misses = 0;
DENC(CapHitMetric, v, p) {
DENC_START(1, 1, p);
denc(v.hits, p);
denc(v.misses, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("hits", hits);
f->dump_unsigned("misses", misses);
}
friend std::ostream& operator<<(std::ostream& os, const CapHitMetric &metric) {
os << "{hits=" << metric.hits << ", misses=" << metric.misses << "}";
return os;
}
};
struct ReadLatencyMetric {
utime_t lat;
utime_t mean;
uint64_t sq_sum;
uint64_t count;
bool updated = false;
DENC(ReadLatencyMetric, v, p) {
DENC_START(3, 1, p);
denc(v.lat, p);
if (struct_v >= 2)
denc(v.updated, p);
if (struct_v >= 3) {
denc(v.mean, p);
denc(v.sq_sum, p);
denc(v.count, p);
}
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_object("read_latency", lat);
f->dump_object("avg_read_alatency", mean);
f->dump_unsigned("sq_sum", sq_sum);
f->dump_unsigned("count", count);
}
friend std::ostream& operator<<(std::ostream& os, const ReadLatencyMetric &metric) {
os << "{latency=" << metric.lat << ", avg_latency=" << metric.mean
<< ", sq_sum=" << metric.sq_sum << ", count=" << metric.count << "}";
return os;
}
};
struct WriteLatencyMetric {
utime_t lat;
utime_t mean;
uint64_t sq_sum;
uint64_t count;
bool updated = false;
DENC(WriteLatencyMetric, v, p) {
DENC_START(3, 1, p);
denc(v.lat, p);
if (struct_v >= 2)
denc(v.updated, p);
if (struct_v >= 3) {
denc(v.mean, p);
denc(v.sq_sum, p);
denc(v.count, p);
}
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_object("write_latency", lat);
f->dump_object("avg_write_alatency", mean);
f->dump_unsigned("sq_sum", sq_sum);
f->dump_unsigned("count", count);
}
friend std::ostream& operator<<(std::ostream& os, const WriteLatencyMetric &metric) {
os << "{latency=" << metric.lat << ", avg_latency=" << metric.mean
<< ", sq_sum=" << metric.sq_sum << ", count=" << metric.count << "}";
return os;
}
};
struct MetadataLatencyMetric {
utime_t lat;
utime_t mean;
uint64_t sq_sum;
uint64_t count;
bool updated = false;
DENC(MetadataLatencyMetric, v, p) {
DENC_START(3, 1, p);
denc(v.lat, p);
if (struct_v >= 2)
denc(v.updated, p);
if (struct_v >= 3) {
denc(v.mean, p);
denc(v.sq_sum, p);
denc(v.count, p);
}
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_object("metadata_latency", lat);
f->dump_object("avg_metadata_alatency", mean);
f->dump_unsigned("sq_sum", sq_sum);
f->dump_unsigned("count", count);
}
friend std::ostream& operator<<(std::ostream& os, const MetadataLatencyMetric &metric) {
os << "{latency=" << metric.lat << ", avg_latency=" << metric.mean
<< ", sq_sum=" << metric.sq_sum << ", count=" << metric.count << "}";
return os;
}
};
struct DentryLeaseHitMetric {
uint64_t hits = 0;
uint64_t misses = 0;
bool updated = false;
DENC(DentryLeaseHitMetric, v, p) {
DENC_START(1, 1, p);
denc(v.hits, p);
denc(v.misses, p);
denc(v.updated, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("hits", hits);
f->dump_unsigned("misses", misses);
}
friend std::ostream& operator<<(std::ostream& os, const DentryLeaseHitMetric &metric) {
os << "{hits=" << metric.hits << ", misses=" << metric.misses << "}";
return os;
}
};
struct OpenedFilesMetric {
uint64_t opened_files = 0;
uint64_t total_inodes = 0;
bool updated = false;
DENC(OpenedFilesMetric, v, p) {
DENC_START(1, 1, p);
denc(v.opened_files, p);
denc(v.total_inodes, p);
denc(v.updated, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("opened_files", opened_files);
f->dump_unsigned("total_inodes", total_inodes);
}
friend std::ostream& operator<<(std::ostream& os, const OpenedFilesMetric &metric) {
os << "{opened_files=" << metric.opened_files << ", total_inodes="
<< metric.total_inodes << "}";
return os;
}
};
struct PinnedIcapsMetric {
uint64_t pinned_icaps = 0;
uint64_t total_inodes = 0;
bool updated = false;
DENC(PinnedIcapsMetric, v, p) {
DENC_START(1, 1, p);
denc(v.pinned_icaps, p);
denc(v.total_inodes, p);
denc(v.updated, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("pinned_icaps", pinned_icaps);
f->dump_unsigned("total_inodes", total_inodes);
}
friend std::ostream& operator<<(std::ostream& os, const PinnedIcapsMetric &metric) {
os << "{pinned_icaps=" << metric.pinned_icaps << ", total_inodes="
<< metric.total_inodes << "}";
return os;
}
};
struct OpenedInodesMetric {
uint64_t opened_inodes = 0;
uint64_t total_inodes = 0;
bool updated = false;
DENC(OpenedInodesMetric, v, p) {
DENC_START(1, 1, p);
denc(v.opened_inodes, p);
denc(v.total_inodes, p);
denc(v.updated, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("opened_inodes", opened_inodes);
f->dump_unsigned("total_inodes", total_inodes);
}
friend std::ostream& operator<<(std::ostream& os, const OpenedInodesMetric &metric) {
os << "{opened_inodes=" << metric.opened_inodes << ", total_inodes="
<< metric.total_inodes << "}";
return os;
}
};
struct ReadIoSizesMetric {
uint64_t total_ops = 0;
uint64_t total_size = 0;
bool updated = false;
DENC(ReadIoSizesMetric, v, p) {
DENC_START(1, 1, p);
denc(v.total_ops, p);
denc(v.total_size, p);
denc(v.updated, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("total_ops", total_ops);
f->dump_unsigned("total_size", total_size);
}
friend std::ostream& operator<<(std::ostream& os, const ReadIoSizesMetric &metric) {
os << "{total_ops=" << metric.total_ops << ", total_size=" << metric.total_size <<"}";
return os;
}
};
struct WriteIoSizesMetric {
uint64_t total_ops = 0;
uint64_t total_size = 0;
bool updated = false;
DENC(WriteIoSizesMetric, v, p) {
DENC_START(1, 1, p);
denc(v.total_ops, p);
denc(v.total_size, p);
denc(v.updated, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_unsigned("total_ops", total_ops);
f->dump_unsigned("total_size", total_size);
}
friend std::ostream& operator<<(std::ostream& os, const WriteIoSizesMetric &metric) {
os << "{total_ops=" << metric.total_ops << ", total_size=" << metric.total_size <<"}";
return os;
}
};
WRITE_CLASS_DENC(CapHitMetric)
WRITE_CLASS_DENC(ReadLatencyMetric)
WRITE_CLASS_DENC(WriteLatencyMetric)
WRITE_CLASS_DENC(MetadataLatencyMetric)
WRITE_CLASS_DENC(DentryLeaseHitMetric)
WRITE_CLASS_DENC(OpenedFilesMetric)
WRITE_CLASS_DENC(PinnedIcapsMetric)
WRITE_CLASS_DENC(OpenedInodesMetric)
WRITE_CLASS_DENC(ReadIoSizesMetric)
WRITE_CLASS_DENC(WriteIoSizesMetric)
// metrics that are forwarded to the MDS by client(s).
struct Metrics {
// metrics
CapHitMetric cap_hit_metric;
ReadLatencyMetric read_latency_metric;
WriteLatencyMetric write_latency_metric;
MetadataLatencyMetric metadata_latency_metric;
DentryLeaseHitMetric dentry_lease_metric;
OpenedFilesMetric opened_files_metric;
PinnedIcapsMetric pinned_icaps_metric;
OpenedInodesMetric opened_inodes_metric;
ReadIoSizesMetric read_io_sizes_metric;
WriteIoSizesMetric write_io_sizes_metric;
// metric update type
uint32_t update_type = UpdateType::UPDATE_TYPE_REFRESH;
DENC(Metrics, v, p) {
DENC_START(4, 1, p);
denc(v.update_type, p);
denc(v.cap_hit_metric, p);
denc(v.read_latency_metric, p);
denc(v.write_latency_metric, p);
denc(v.metadata_latency_metric, p);
if (struct_v >= 2) {
denc(v.dentry_lease_metric, p);
}
if (struct_v >= 3) {
denc(v.opened_files_metric, p);
denc(v.pinned_icaps_metric, p);
denc(v.opened_inodes_metric, p);
}
if (struct_v >= 4) {
denc(v.read_io_sizes_metric, p);
denc(v.write_io_sizes_metric, p);
}
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_int("update_type", static_cast<uint32_t>(update_type));
f->dump_object("cap_hit_metric", cap_hit_metric);
f->dump_object("read_latency_metric", read_latency_metric);
f->dump_object("write_latency_metric", write_latency_metric);
f->dump_object("metadata_latency_metric", metadata_latency_metric);
f->dump_object("dentry_lease_metric", dentry_lease_metric);
f->dump_object("opened_files_metric", opened_files_metric);
f->dump_object("pinned_icaps_metric", pinned_icaps_metric);
f->dump_object("opened_inodes_metric", opened_inodes_metric);
f->dump_object("read_io_sizes_metric", read_io_sizes_metric);
f->dump_object("write_io_sizes_metric", write_io_sizes_metric);
}
friend std::ostream& operator<<(std::ostream& os, const Metrics& metrics) {
os << "[update_type=" << metrics.update_type << ", metrics={"
<< "cap_hit_metric=" << metrics.cap_hit_metric
<< ", read_latency=" << metrics.read_latency_metric
<< ", write_latency=" << metrics.write_latency_metric
<< ", metadata_latency=" << metrics.metadata_latency_metric
<< ", dentry_lease=" << metrics.dentry_lease_metric
<< ", opened_files_metric=" << metrics.opened_files_metric
<< ", pinned_icaps_metric=" << metrics.pinned_icaps_metric
<< ", opened_inodes_metric=" << metrics.opened_inodes_metric
<< ", read_io_sizes_metric=" << metrics.read_io_sizes_metric
<< ", write_io_sizes_metric=" << metrics.write_io_sizes_metric
<< "}]";
return os;
}
};
WRITE_CLASS_DENC(Metrics)
struct metrics_message_t {
version_t seq = 0;
mds_rank_t rank = MDS_RANK_NONE;
std::map<entity_inst_t, Metrics> client_metrics_map;
metrics_message_t() {
}
metrics_message_t(version_t seq, mds_rank_t rank)
: seq(seq), rank(rank) {
}
void encode(bufferlist &bl, uint64_t features) const {
using ceph::encode;
ENCODE_START(1, 1, bl);
encode(seq, bl);
encode(rank, bl);
encode(client_metrics_map, bl, features);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &iter) {
using ceph::decode;
DECODE_START(1, iter);
decode(seq, iter);
decode(rank, iter);
decode(client_metrics_map, iter);
DECODE_FINISH(iter);
}
void dump(Formatter *f) const {
f->dump_unsigned("seq", seq);
f->dump_int("rank", rank);
for (auto &[client, metrics] : client_metrics_map) {
f->dump_object("client", client);
f->dump_object("metrics", metrics);
}
}
friend std::ostream& operator<<(std::ostream& os, const metrics_message_t &metrics_message) {
os << "[sequence=" << metrics_message.seq << ", rank=" << metrics_message.rank
<< ", metrics=" << metrics_message.client_metrics_map << "]";
return os;
}
};
WRITE_CLASS_ENCODER_FEATURES(metrics_message_t)
#endif // CEPH_MDS_PERF_METRIC_TYPES_H
| 11,492 | 26.429594 | 95 | h |
null | ceph-main/src/mds/MDSPinger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_MDS_PINGER_H
#define CEPH_MDS_PINGER_H
#include <map>
#include "include/types.h"
#include "msg/msg_types.h"
#include "common/ceph_mutex.h"
#include "common/ceph_time.h"
#include "messages/MMDSPing.h"
#include "mdstypes.h"
class MDSRank;
class MDSPinger {
public:
MDSPinger(MDSRank *mds);
// send a ping message to an mds rank. initialize ping state if
// required.
void send_ping(mds_rank_t rank, const entity_addrvec_t &addr);
// check if a pong response is valid. a pong reponse from an
// mds is valid if at least one ping message was sent to the
// mds and the sequence number in the pong is outstanding.
bool pong_received(mds_rank_t rank, version_t seq);
// reset the ping state for a given rank
void reset_ping(mds_rank_t rank);
// check if a rank is lagging (based on pong response) responding
// to a ping message.
bool is_rank_lagging(mds_rank_t rank);
private:
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
// Initial Sequence Number (ISN) of the first ping message sent
// by rank 0 to other active ranks (incuding itself).
static constexpr uint64_t MDS_PINGER_ISN = 1;
struct PingState {
version_t last_seq = MDS_PINGER_ISN;
std::map<version_t, time> seq_time_map;
time last_acked_time = clock::now();
};
MDSRank *mds;
// drop this lock when calling ->send_message_mds() else mds might
// deadlock
ceph::mutex lock = ceph::make_mutex("MDSPinger::lock");
std::map<mds_rank_t, PingState> ping_state_by_rank;
};
#endif // CEPH_MDS_PINGER_H
| 1,669 | 25.935484 | 70 | h |
null | ceph-main/src/mds/MDSRank.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef MDS_RANK_H_
#define MDS_RANK_H_
#include <string_view>
#include <boost/asio/io_context.hpp>
#include "common/DecayCounter.h"
#include "common/LogClient.h"
#include "common/Timer.h"
#include "common/fair_mutex.h"
#include "common/TrackedOp.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
#include "messages/MClientRequest.h"
#include "messages/MCommand.h"
#include "messages/MMDSMap.h"
#include "Beacon.h"
#include "DamageTable.h"
#include "MDSMap.h"
#include "SessionMap.h"
#include "MDCache.h"
#include "MDLog.h"
#include "MDSContext.h"
#include "PurgeQueue.h"
#include "Server.h"
#include "MetricsHandler.h"
#include "osdc/Journaler.h"
// Full .h import instead of forward declaration for PerfCounter, for the
// benefit of those including this header and using MDSRank::logger
#include "common/perf_counters.h"
enum {
l_mds_first = 2000,
l_mds_request,
l_mds_reply,
l_mds_reply_latency,
l_mds_slow_reply,
l_mds_forward,
l_mds_dir_fetch_complete,
l_mds_dir_fetch_keys,
l_mds_dir_commit,
l_mds_dir_split,
l_mds_dir_merge,
l_mds_inodes,
l_mds_inodes_top,
l_mds_inodes_bottom,
l_mds_inodes_pin_tail,
l_mds_inodes_pinned,
l_mds_inodes_expired,
l_mds_inodes_with_caps,
l_mds_caps,
l_mds_subtrees,
l_mds_traverse,
l_mds_traverse_hit,
l_mds_traverse_forward,
l_mds_traverse_discover,
l_mds_traverse_dir_fetch,
l_mds_traverse_remote_ino,
l_mds_traverse_lock,
l_mds_load_cent,
l_mds_dispatch_queue_len,
l_mds_exported,
l_mds_exported_inodes,
l_mds_imported,
l_mds_imported_inodes,
l_mds_openino_dir_fetch,
l_mds_openino_backtrace_fetch,
l_mds_openino_peer_discover,
l_mds_root_rfiles,
l_mds_root_rbytes,
l_mds_root_rsnaps,
l_mds_scrub_backtrace_fetch,
l_mds_scrub_set_tag,
l_mds_scrub_backtrace_repaired,
l_mds_scrub_inotable_repaired,
l_mds_scrub_dir_inodes,
l_mds_scrub_dir_base_inodes,
l_mds_scrub_dirfrag_rstats,
l_mds_scrub_file_inodes,
l_mdss_handle_inode_file_caps,
l_mdss_ceph_cap_op_revoke,
l_mdss_ceph_cap_op_grant,
l_mdss_ceph_cap_op_trunc,
l_mdss_ceph_cap_op_flushsnap_ack,
l_mdss_ceph_cap_op_flush_ack,
l_mdss_handle_client_caps,
l_mdss_handle_client_caps_dirty,
l_mdss_handle_client_cap_release,
l_mdss_process_request_cap_release,
l_mds_last,
};
// memory utilization
enum {
l_mdm_first = 2500,
l_mdm_ino,
l_mdm_inoa,
l_mdm_inos,
l_mdm_dir,
l_mdm_dira,
l_mdm_dirs,
l_mdm_dn,
l_mdm_dna,
l_mdm_dns,
l_mdm_cap,
l_mdm_capa,
l_mdm_caps,
l_mdm_rss,
l_mdm_heap,
l_mdm_last,
};
namespace ceph {
struct heartbeat_handle_d;
}
class Locker;
class MDCache;
class MDLog;
class MDBalancer;
class InoTable;
class SnapServer;
class SnapClient;
class MDSTableServer;
class MDSTableClient;
class Messenger;
class MetricAggregator;
class Objecter;
class MonClient;
class MgrClient;
class Finisher;
class ScrubStack;
class C_ExecAndReply;
struct MDSMetaRequest {
private:
int _op;
CDentry *_dentry;
ceph_tid_t _tid;
public:
explicit MDSMetaRequest(int op, CDentry *dn, ceph_tid_t tid) :
_op(op), _dentry(dn), _tid(tid) {
if (_dentry) {
_dentry->get(CDentry::PIN_PURGING);
}
}
~MDSMetaRequest() {
if (_dentry) {
_dentry->put(CDentry::PIN_PURGING);
}
}
CDentry *get_dentry() { return _dentry; }
int get_op() { return _op; }
ceph_tid_t get_tid() { return _tid; }
};
/**
* The public part of this class's interface is what's exposed to all
* the various subsystems (server, mdcache, etc), such as pointers
* to the other subsystems, and message-sending calls.
*/
class MDSRank {
public:
friend class C_Flush_Journal;
friend class C_Drop_Cache;
friend class C_CacheDropExecAndReply;
friend class C_ScrubExecAndReply;
friend class C_ScrubControlExecAndReply;
CephContext *cct;
MDSRank(
mds_rank_t whoami_,
ceph::fair_mutex &mds_lock_,
LogChannelRef &clog_,
CommonSafeTimer<ceph::fair_mutex> &timer_,
Beacon &beacon_,
std::unique_ptr<MDSMap> & mdsmap_,
Messenger *msgr,
MonClient *monc_,
MgrClient *mgrc,
Context *respawn_hook_,
Context *suicide_hook_,
boost::asio::io_context& ioc);
mds_rank_t get_nodeid() const { return whoami; }
int64_t get_metadata_pool() const
{
return metadata_pool;
}
mono_time get_starttime() const {
return starttime;
}
std::chrono::duration<double> get_uptime() const {
mono_time now = mono_clock::now();
return std::chrono::duration<double>(now-starttime);
}
bool is_daemon_stopping() const;
MDSTableClient *get_table_client(int t);
MDSTableServer *get_table_server(int t);
Session *get_session(client_t client) {
return sessionmap.get_session(entity_name_t::CLIENT(client.v));
}
Session *get_session(const cref_t<Message> &m);
MDSMap::DaemonState get_state() const { return state; }
MDSMap::DaemonState get_want_state() const { return beacon.get_want_state(); }
bool is_creating() const { return state == MDSMap::STATE_CREATING; }
bool is_starting() const { return state == MDSMap::STATE_STARTING; }
bool is_standby() const { return state == MDSMap::STATE_STANDBY; }
bool is_replay() const { return state == MDSMap::STATE_REPLAY; }
bool is_standby_replay() const { return state == MDSMap::STATE_STANDBY_REPLAY; }
bool is_resolve() const { return state == MDSMap::STATE_RESOLVE; }
bool is_reconnect() const { return state == MDSMap::STATE_RECONNECT; }
bool is_rejoin() const { return state == MDSMap::STATE_REJOIN; }
bool is_clientreplay() const { return state == MDSMap::STATE_CLIENTREPLAY; }
bool is_active() const { return state == MDSMap::STATE_ACTIVE; }
bool is_stopping() const { return state == MDSMap::STATE_STOPPING; }
bool is_any_replay() const { return (is_replay() || is_standby_replay()); }
bool is_stopped() const { return mdsmap->is_stopped(whoami); }
bool is_cluster_degraded() const { return cluster_degraded; }
bool allows_multimds_snaps() const { return mdsmap->allows_multimds_snaps(); }
bool is_cache_trimmable() const {
return is_standby_replay() || is_clientreplay() || is_active() || is_stopping();
}
void handle_write_error(int err);
void handle_write_error_with_lock(int err);
void update_mlogger();
void queue_waiter(MDSContext *c) {
finished_queue.push_back(c);
progress_thread.signal();
}
void queue_waiter_front(MDSContext *c) {
finished_queue.push_front(c);
progress_thread.signal();
}
void queue_waiters(MDSContext::vec& ls) {
MDSContext::vec v;
v.swap(ls);
std::copy(v.begin(), v.end(), std::back_inserter(finished_queue));
progress_thread.signal();
}
void queue_waiters_front(MDSContext::vec& ls) {
MDSContext::vec v;
v.swap(ls);
std::copy(v.rbegin(), v.rend(), std::front_inserter(finished_queue));
progress_thread.signal();
}
// Daemon lifetime functions: these guys break the abstraction
// and call up into the parent MDSDaemon instance. It's kind
// of unavoidable: if we want any depth into our calls
// to be able to e.g. tear down the whole process, we have to
// have a reference going all the way down.
// >>>
void suicide();
void respawn();
// <<<
/**
* Call this periodically if inside a potentially long running piece
* of code while holding the mds_lock
*/
void heartbeat_reset();
int heartbeat_reset_grace(int count=1) {
return count * _heartbeat_reset_grace;
}
/**
* Report state DAMAGED to the mon, and then pass on to respawn(). Call
* this when an unrecoverable error is encountered while attempting
* to load an MDS rank's data structures. This is *not* for use with
* errors affecting normal dirfrag/inode objects -- they should be handled
* through cleaner scrub/repair mechanisms.
*
* Callers must already hold mds_lock.
*/
void damaged();
/**
* Wrapper around `damaged` for users who are not
* already holding mds_lock.
*
* Callers must not already hold mds_lock.
*/
void damaged_unlocked();
double last_cleared_laggy() const {
return beacon.last_cleared_laggy();
}
double get_dispatch_queue_max_age(utime_t now) const;
void send_message_mds(const ref_t<Message>& m, mds_rank_t mds);
void send_message_mds(const ref_t<Message>& m, const entity_addrvec_t &addr);
void forward_message_mds(const cref_t<MClientRequest>& req, mds_rank_t mds);
void send_message_client_counted(const ref_t<Message>& m, client_t client);
void send_message_client_counted(const ref_t<Message>& m, Session* session);
void send_message_client_counted(const ref_t<Message>& m, const ConnectionRef& connection);
void send_message_client(const ref_t<Message>& m, Session* session);
void send_message(const ref_t<Message>& m, const ConnectionRef& c);
void wait_for_bootstrapped_peer(mds_rank_t who, MDSContext *c) {
waiting_for_bootstrapping_peer[who].push_back(c);
}
void wait_for_active_peer(mds_rank_t who, MDSContext *c) {
waiting_for_active_peer[who].push_back(c);
}
void wait_for_cluster_recovered(MDSContext *c) {
ceph_assert(cluster_degraded);
waiting_for_active_peer[MDS_RANK_NONE].push_back(c);
}
void wait_for_any_client_connection(MDSContext *c) {
waiting_for_any_client_connection.push_back(c);
}
void kick_waiters_for_any_client_connection(void) {
finish_contexts(g_ceph_context, waiting_for_any_client_connection);
}
void wait_for_active(MDSContext *c) {
waiting_for_active.push_back(c);
}
void wait_for_replay(MDSContext *c) {
waiting_for_replay.push_back(c);
}
void wait_for_rejoin(MDSContext *c) {
waiting_for_rejoin.push_back(c);
}
void wait_for_reconnect(MDSContext *c) {
waiting_for_reconnect.push_back(c);
}
void wait_for_resolve(MDSContext *c) {
waiting_for_resolve.push_back(c);
}
void wait_for_mdsmap(epoch_t e, MDSContext *c) {
waiting_for_mdsmap[e].push_back(c);
}
void enqueue_replay(MDSContext *c) {
replay_queue.push_back(c);
}
bool queue_one_replay();
void maybe_clientreplay_done();
void set_osd_epoch_barrier(epoch_t e);
epoch_t get_osd_epoch_barrier() const {return osd_epoch_barrier;}
epoch_t get_osd_epoch() const;
ceph_tid_t issue_tid() { return ++last_tid; }
MDSMap *get_mds_map() { return mdsmap.get(); }
uint64_t get_num_requests() const { return logger->get(l_mds_request); }
int get_mds_slow_req_count() const { return mds_slow_req_count; }
void dump_status(Formatter *f) const;
void hit_export_target(mds_rank_t rank, double amount=-1.0);
bool is_export_target(mds_rank_t rank) {
const std::set<mds_rank_t>& map_targets = mdsmap->get_mds_info(get_nodeid()).export_targets;
return map_targets.count(rank);
}
bool evict_client(int64_t session_id, bool wait, bool blocklist,
std::ostream& ss, Context *on_killed=nullptr);
int config_client(int64_t session_id, bool remove,
const std::string& option, const std::string& value,
std::ostream& ss);
void schedule_inmemory_logger();
double get_inject_journal_corrupt_dentry_first() const {
return inject_journal_corrupt_dentry_first;
}
// Reference to global MDS::mds_lock, so that users of MDSRank don't
// carry around references to the outer MDS, and we can substitute
// a separate lock here in future potentially.
ceph::fair_mutex &mds_lock;
// Reference to global cluster log client, just to avoid initialising
// a separate one here.
LogChannelRef &clog;
// Reference to global timer utility, because MDSRank and MDSDaemon
// currently both use the same mds_lock, so it makes sense for them
// to share a timer.
CommonSafeTimer<ceph::fair_mutex> &timer;
std::unique_ptr<MDSMap> &mdsmap; /* MDSDaemon::mdsmap */
Objecter *objecter;
// sub systems
Server *server = nullptr;
MDCache *mdcache = nullptr;
Locker *locker = nullptr;
MDLog *mdlog = nullptr;
MDBalancer *balancer = nullptr;
ScrubStack *scrubstack = nullptr;
DamageTable damage_table;
InoTable *inotable = nullptr;
SnapServer *snapserver = nullptr;
SnapClient *snapclient = nullptr;
SessionMap sessionmap;
PerfCounters *logger = nullptr, *mlogger = nullptr;
OpTracker op_tracker;
std::map<ceph_tid_t, MDSMetaRequest> internal_client_requests;
// The last different state I held before current
MDSMap::DaemonState last_state = MDSMap::STATE_BOOT;
// The state assigned to me by the MDSMap
MDSMap::DaemonState state = MDSMap::STATE_STANDBY;
bool cluster_degraded = false;
Finisher *finisher;
protected:
typedef enum {
// The MDSMap is available, configure default layouts and structures
MDS_BOOT_INITIAL = 0,
// We are ready to open some inodes
MDS_BOOT_OPEN_ROOT,
// We are ready to do a replay if needed
MDS_BOOT_PREPARE_LOG,
// Replay is complete
MDS_BOOT_REPLAY_DONE
} BootStep;
class ProgressThread : public Thread {
public:
explicit ProgressThread(MDSRank *mds_) : mds(mds_) {}
void * entry() override;
void shutdown();
void signal() {cond.notify_all();}
private:
MDSRank *mds;
std::condition_variable_any cond;
} progress_thread;
class C_MDS_StandbyReplayRestart;
class C_MDS_StandbyReplayRestartFinish;
// Friended to access retry_dispatch
friend class C_MDS_RetryMessage;
friend class C_MDS_BootStart;
friend class C_MDS_InternalBootStart;
friend class C_MDS_MonCommand;
const mds_rank_t whoami;
~MDSRank();
void inc_dispatch_depth() { ++dispatch_depth; }
void dec_dispatch_depth() { --dispatch_depth; }
void retry_dispatch(const cref_t<Message> &m);
bool is_valid_message(const cref_t<Message> &m);
void handle_message(const cref_t<Message> &m);
void _advance_queues();
bool _dispatch(const cref_t<Message> &m, bool new_msg);
bool is_stale_message(const cref_t<Message> &m) const;
/**
* Emit clog warnings for any ops reported as warnings by optracker
*/
void check_ops_in_flight();
/**
* Share MDSMap with clients
*/
void create_logger();
void dump_clientreplay_status(Formatter *f) const;
void command_scrub_start(Formatter *f,
std::string_view path, std::string_view tag,
const std::vector<std::string>& scrubop_vec, Context *on_finish);
void command_tag_path(Formatter *f, std::string_view path,
std::string_view tag);
// scrub control commands
void command_scrub_abort(Formatter *f, Context *on_finish);
void command_scrub_pause(Formatter *f, Context *on_finish);
void command_scrub_resume(Formatter *f);
void command_scrub_status(Formatter *f);
void command_flush_path(Formatter *f, std::string_view path);
void command_flush_journal(Formatter *f);
void command_get_subtrees(Formatter *f);
void command_export_dir(Formatter *f,
std::string_view path, mds_rank_t dest);
bool command_dirfrag_split(
cmdmap_t cmdmap,
std::ostream &ss);
bool command_dirfrag_merge(
cmdmap_t cmdmap,
std::ostream &ss);
bool command_dirfrag_ls(
cmdmap_t cmdmap,
std::ostream &ss,
Formatter *f);
int _command_export_dir(std::string_view path, mds_rank_t dest);
CDir *_command_dirfrag_get(
const cmdmap_t &cmdmap,
std::ostream &ss);
void command_openfiles_ls(Formatter *f);
void command_dump_tree(const cmdmap_t &cmdmap, std::ostream &ss, Formatter *f);
void command_dump_inode(Formatter *f, const cmdmap_t &cmdmap, std::ostream &ss);
void command_dump_dir(Formatter *f, const cmdmap_t &cmdmap, std::ostream &ss);
void command_cache_drop(uint64_t timeout, Formatter *f, Context *on_finish);
// FIXME the state machine logic should be separable from the dispatch
// logic that calls it.
// >>>
void calc_recovery_set();
void request_state(MDSMap::DaemonState s);
void boot_create(); // i am new mds.
void boot_start(BootStep step=MDS_BOOT_INITIAL, int r=0); // starting|replay
void replay_start();
void creating_done();
void starting_done();
void replay_done();
void standby_replay_restart();
void _standby_replay_restart_finish(int r, uint64_t old_read_pos);
void reopen_log();
void resolve_start();
void resolve_done();
void reconnect_start();
void reconnect_done();
void rejoin_joint_start();
void rejoin_start();
void rejoin_done();
void recovery_done(int oldstate);
void clientreplay_start();
void clientreplay_done();
void active_start();
void stopping_start();
void stopping_done();
void validate_sessions();
void handle_mds_recovery(mds_rank_t who);
void handle_mds_failure(mds_rank_t who);
/* Update MDSMap export_targets for this rank. Called on ::tick(). */
void update_targets();
void _mon_command_finish(int r, std::string_view cmd, std::string_view outs);
void set_mdsmap_multimds_snaps_allowed();
Context *create_async_exec_context(C_ExecAndReply *ctx);
// blocklist the provided addrs and set OSD epoch barrier
// with the provided epoch.
void apply_blocklist(const std::set<entity_addr_t> &addrs, epoch_t epoch);
void reset_event_flags();
// Incarnation as seen in MDSMap at the point where a rank is
// assigned.
int incarnation = 0;
// Flag to indicate we entered shutdown: anyone seeing this to be true
// after taking mds_lock must drop out.
bool stopping = false;
// PurgeQueue is only used by StrayManager, but it is owned by MDSRank
// because its init/shutdown happens at the top level.
PurgeQueue purge_queue;
MetricsHandler metrics_handler;
std::unique_ptr<MetricAggregator> metric_aggregator;
std::list<cref_t<Message>> waiting_for_nolaggy;
MDSContext::que finished_queue;
// Dispatch, retry, queues
int dispatch_depth = 0;
ceph::heartbeat_handle_d *hb = nullptr; // Heartbeat for threads using mds_lock
double heartbeat_grace;
int _heartbeat_reset_grace;
std::map<mds_rank_t, version_t> peer_mdsmap_epoch;
ceph_tid_t last_tid = 0; // for mds-initiated requests (e.g. stray rename)
MDSContext::vec waiting_for_active, waiting_for_replay, waiting_for_rejoin,
waiting_for_reconnect, waiting_for_resolve;
MDSContext::vec waiting_for_any_client_connection;
MDSContext::que replay_queue;
bool replaying_requests_done = false;
std::map<mds_rank_t, MDSContext::vec> waiting_for_active_peer;
std::map<mds_rank_t, MDSContext::vec> waiting_for_bootstrapping_peer;
std::map<epoch_t, MDSContext::vec> waiting_for_mdsmap;
epoch_t osd_epoch_barrier = 0;
// Const reference to the beacon so that we can behave differently
// when it's laggy.
Beacon &beacon;
int mds_slow_req_count = 0;
std::map<mds_rank_t,DecayCounter> export_targets; /* targets this MDS is exporting to or wants/tries to */
Messenger *messenger;
MonClient *monc;
MgrClient *mgrc;
Context *respawn_hook;
Context *suicide_hook;
bool standby_replaying = false; // true if current replay pass is in standby-replay mode
uint64_t extraordinary_events_dump_interval = 0;
double inject_journal_corrupt_dentry_first = 0.0;
private:
bool send_status = true;
// The metadata pool won't change in the whole life time of the fs,
// with this we can get rid of the mds_lock in many places too.
int64_t metadata_pool = -1;
// "task" string that gets displayed in ceph status
inline static const std::string SCRUB_STATUS_KEY = "scrub status";
bool client_eviction_dump = false;
void get_task_status(std::map<std::string, std::string> *status);
void schedule_update_timer_task();
void send_task_status();
void inmemory_logger();
bool is_rank0() const {
return whoami == (mds_rank_t)0;
}
mono_time starttime = mono_clock::zero();
boost::asio::io_context& ioc;
};
class C_MDS_RetryMessage : public MDSInternalContext {
public:
C_MDS_RetryMessage(MDSRank *mds, const cref_t<Message> &m)
: MDSInternalContext(mds), m(m) {}
void finish(int r) override {
get_mds()->retry_dispatch(m);
}
protected:
cref_t<Message> m;
};
class CF_MDS_RetryMessageFactory : public MDSContextFactory {
public:
CF_MDS_RetryMessageFactory(MDSRank *mds, const cref_t<Message> &m)
: mds(mds), m(m) {}
MDSContext *build() {
return new C_MDS_RetryMessage(mds, m);
}
private:
MDSRank *mds;
cref_t<Message> m;
};
/**
* The aspect of MDSRank exposed to MDSDaemon but not subsystems: i.e.
* the service/dispatcher stuff like init/shutdown that subsystems should
* never touch.
*/
class MDSRankDispatcher : public MDSRank, public md_config_obs_t
{
public:
MDSRankDispatcher(
mds_rank_t whoami_,
ceph::fair_mutex &mds_lock_,
LogChannelRef &clog_,
CommonSafeTimer<ceph::fair_mutex> &timer_,
Beacon &beacon_,
std::unique_ptr<MDSMap> &mdsmap_,
Messenger *msgr,
MonClient *monc_,
MgrClient *mgrc,
Context *respawn_hook_,
Context *suicide_hook_,
boost::asio::io_context& ioc);
void init();
void tick();
void shutdown();
void handle_asok_command(
std::string_view command,
const cmdmap_t& cmdmap,
Formatter *f,
const bufferlist &inbl,
std::function<void(int,const std::string&,bufferlist&)> on_finish);
void handle_mds_map(const cref_t<MMDSMap> &m, const MDSMap &oldmap);
void handle_osd_map();
void update_log_config();
const char** get_tracked_conf_keys() const override final;
void handle_conf_change(const ConfigProxy& conf, const std::set<std::string>& changed) override;
void dump_sessions(const SessionFilter &filter, Formatter *f, bool cap_dump=false) const;
void evict_clients(const SessionFilter &filter,
std::function<void(int,const std::string&,bufferlist&)> on_finish);
// Call into me from MDS::ms_dispatch
bool ms_dispatch(const cref_t<Message> &m);
};
#endif // MDS_RANK_H_
| 23,037 | 29.799465 | 110 | h |
null | ceph-main/src/mds/MDSTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDSTABLE_H
#define CEPH_MDSTABLE_H
#include "mdstypes.h"
#include "mds_table_types.h"
#include "include/buffer_fwd.h"
#include "MDSContext.h"
class MDSRank;
class MDSTable {
public:
friend class C_IO_MT_Load;
friend class C_IO_MT_Save;
MDSTable(MDSRank *m, std::string_view n, bool is_per_mds) :
mds(m), table_name(n), per_mds(is_per_mds) {}
virtual ~MDSTable() {}
void set_rank(mds_rank_t r)
{
rank = r;
}
version_t get_version() const { return version; }
version_t get_committed_version() const { return committed_version; }
version_t get_committing_version() const { return committing_version; }
version_t get_projected_version() const { return projected_version; }
void force_replay_version(version_t v) {
version = projected_version = v;
}
//version_t project_version() { return ++projected_version; }
//version_t inc_version() { return ++version; }
// load/save from disk (hack)
bool is_undef() const { return state == STATE_UNDEF; }
bool is_active() const { return state == STATE_ACTIVE; }
bool is_opening() const { return state == STATE_OPENING; }
void reset();
void save(MDSContext *onfinish=0, version_t need=0);
void save_2(int r, version_t v);
void shutdown() {
if (is_active()) save(0);
}
object_t get_object_name() const;
void load(MDSContext *onfinish);
void load_2(int, bufferlist&, Context *onfinish);
// child must overload these
virtual void reset_state() = 0;
virtual void decode_state(bufferlist::const_iterator& p) = 0;
virtual void encode_state(bufferlist& bl) const = 0;
MDSRank *mds;
protected:
static const int STATE_UNDEF = 0;
static const int STATE_OPENING = 1;
static const int STATE_ACTIVE = 2;
//static const int STATE_COMMITTING = 3;
std::string table_name;
bool per_mds;
mds_rank_t rank = MDS_RANK_NONE;
int state = STATE_UNDEF;
version_t version = 0, committing_version = 0, committed_version = 0, projected_version = 0;
std::map<version_t, MDSContext::vec > waitfor_save;
};
#endif
| 2,483 | 26 | 94 | h |
null | ceph-main/src/mds/MDSTableClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDSTABLECLIENT_H
#define CEPH_MDSTABLECLIENT_H
#include "include/types.h"
#include "MDSContext.h"
#include "mds_table_types.h"
#include "messages/MMDSTableRequest.h"
class MDSRank;
class LogSegment;
class MDSTableClient {
public:
MDSTableClient(MDSRank *m, int tab) :
mds(m), table(tab) {}
virtual ~MDSTableClient() {}
void handle_request(const cref_t<MMDSTableRequest> &m);
void _prepare(bufferlist& mutation, version_t *ptid, bufferlist *pbl, MDSContext *onfinish);
void commit(version_t tid, LogSegment *ls);
void resend_commits();
void resend_prepares();
// for recovery (by me)
void got_journaled_agree(version_t tid, LogSegment *ls);
void got_journaled_ack(version_t tid);
bool has_committed(version_t tid) const {
return pending_commit.count(tid) == 0;
}
void wait_for_ack(version_t tid, MDSContext *c) {
ack_waiters[tid].push_back(c);
}
std::set<version_t> get_journaled_tids() const {
std::set<version_t> tids;
for (auto p : pending_commit)
tids.insert(p.first);
return tids;
}
void handle_mds_failure(mds_rank_t mds);
bool is_server_ready(void) const {
return server_ready;
}
// child must implement
virtual void resend_queries() = 0;
virtual void handle_query_result(const cref_t<MMDSTableRequest> &m) = 0;
virtual void handle_notify_prep(const cref_t<MMDSTableRequest> &m) = 0;
virtual void notify_commit(version_t tid) = 0;
protected:
// prepares
struct _pending_prepare {
_pending_prepare() {}
_pending_prepare(MDSContext *c, version_t *pt, bufferlist *pb, bufferlist& m) :
onfinish(c), ptid(pt), pbl(pb), mutation(m) {}
MDSContext *onfinish = nullptr;
version_t *ptid = nullptr;
bufferlist *pbl = nullptr;
bufferlist mutation;
};
friend class C_LoggedAck;
void handle_reply(class MMDSTableQuery *m);
void _logged_ack(version_t tid);
MDSRank *mds;
int table;
uint64_t last_reqid = ~0ULL;
bool server_ready = false;
std::map<uint64_t, _pending_prepare> pending_prepare;
std::map<version_t, uint64_t> prepared_update;
std::list<_pending_prepare> waiting_for_reqid;
// pending commits
std::map<version_t, LogSegment*> pending_commit;
std::map<version_t, MDSContext::vec > ack_waiters;
};
#endif
| 2,713 | 25.096154 | 94 | h |
null | ceph-main/src/mds/MDSTableServer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDSTABLESERVER_H
#define CEPH_MDSTABLESERVER_H
#include "MDSTable.h"
#include "MDSContext.h"
#include "messages/MMDSTableRequest.h"
class MDSTableServer : public MDSTable {
public:
friend class C_ServerRecovery;
MDSTableServer(MDSRank *m, int tab) :
MDSTable(m, get_mdstable_name(tab), false), table(tab) {}
~MDSTableServer() override {}
virtual void handle_query(const cref_t<MMDSTableRequest> &m) = 0;
virtual void _prepare(const bufferlist &bl, uint64_t reqid, mds_rank_t bymds, bufferlist& out) = 0;
virtual void _get_reply_buffer(version_t tid, bufferlist *pbl) const = 0;
virtual void _commit(version_t tid, cref_t<MMDSTableRequest> req) = 0;
virtual void _rollback(version_t tid) = 0;
virtual void _server_update(bufferlist& bl) { ceph_abort(); }
virtual bool _notify_prep(version_t tid) { return false; };
void _note_prepare(mds_rank_t mds, uint64_t reqid, bool replay=false) {
version++;
if (replay)
projected_version = version;
pending_for_mds[version].mds = mds;
pending_for_mds[version].reqid = reqid;
pending_for_mds[version].tid = version;
}
void _note_commit(uint64_t tid, bool replay=false) {
version++;
if (replay)
projected_version = version;
pending_for_mds.erase(tid);
}
void _note_rollback(uint64_t tid, bool replay=false) {
version++;
if (replay)
projected_version = version;
pending_for_mds.erase(tid);
}
void _note_server_update(bufferlist& bl, bool replay=false) {
version++;
if (replay)
projected_version = version;
}
void reset_state() override {
pending_for_mds.clear();
++version;
}
void handle_request(const cref_t<MMDSTableRequest> &m);
void do_server_update(bufferlist& bl);
virtual void encode_server_state(bufferlist& bl) const = 0;
virtual void decode_server_state(bufferlist::const_iterator& bl) = 0;
void encode_state(bufferlist& bl) const override {
encode_server_state(bl);
encode(pending_for_mds, bl);
}
void decode_state(bufferlist::const_iterator& bl) override {
decode_server_state(bl);
decode(pending_for_mds, bl);
}
// recovery
void finish_recovery(std::set<mds_rank_t>& active);
void _do_server_recovery();
void handle_mds_recovery(mds_rank_t who);
void handle_mds_failure_or_stop(mds_rank_t who);
protected:
int table;
bool recovered = false;
std::set<mds_rank_t> active_clients;
private:
struct notify_info_t {
notify_info_t() {}
std::set<mds_rank_t> notify_ack_gather;
mds_rank_t mds;
ref_t<MMDSTableRequest> reply = NULL;
MDSContext *onfinish = nullptr;
};
friend class C_Prepare;
friend class C_Commit;
friend class C_Rollback;
friend class C_ServerUpdate;
void handle_prepare(const cref_t<MMDSTableRequest> &m);
void _prepare_logged(const cref_t<MMDSTableRequest> &m, version_t tid);
void handle_commit(const cref_t<MMDSTableRequest> &m);
void _commit_logged(const cref_t<MMDSTableRequest> &m);
void handle_rollback(const cref_t<MMDSTableRequest> &m);
void _rollback_logged(const cref_t<MMDSTableRequest> &m);
void _server_update_logged(bufferlist& bl);
void handle_notify_ack(const cref_t<MMDSTableRequest> &m);
std::map<version_t,mds_table_pending_t> pending_for_mds; // ** child should encode this! **
std::set<version_t> committing_tids;
std::map<version_t, notify_info_t> pending_notifies;
};
#endif
| 3,850 | 29.085938 | 101 | h |
null | ceph-main/src/mds/Mantle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Michael Sevilla <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MANTLE_H
#define CEPH_MANTLE_H
#include <string_view>
#include <lua.hpp>
#include <vector>
#include <map>
#include <string>
#include "mdstypes.h"
class Mantle {
public:
Mantle();
~Mantle() { if (L) lua_close(L); }
int balance(std::string_view script,
mds_rank_t whoami,
const std::vector <std::map<std::string, double>> &metrics,
std::map<mds_rank_t,double> &my_targets);
protected:
lua_State *L;
};
#endif
| 926 | 21.609756 | 75 | h |
null | ceph-main/src/mds/MetricAggregator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_MDS_METRIC_AGGREGATOR_H
#define CEPH_MDS_METRIC_AGGREGATOR_H
#include <map>
#include <set>
#include <thread>
#include "msg/msg_types.h"
#include "msg/Dispatcher.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
#include "messages/MMDSMetrics.h"
#include "mgr/MetricTypes.h"
#include "mgr/MDSPerfMetricTypes.h"
#include "mdstypes.h"
#include "MDSMap.h"
#include "MDSPinger.h"
class MDSRank;
class MgrClient;
class MetricAggregator : public Dispatcher {
public:
MetricAggregator(CephContext *cct, MDSRank *mds, MgrClient *mgrc);
int init();
void shutdown();
void notify_mdsmap(const MDSMap &mdsmap);
bool ms_can_fast_dispatch_any() const override {
return true;
}
bool ms_can_fast_dispatch2(const cref_t<Message> &m) const override;
void ms_fast_dispatch2(const ref_t<Message> &m) override;
bool ms_dispatch2(const ref_t<Message> &m) override;
void ms_handle_connect(Connection *c) override {
}
bool ms_handle_reset(Connection *c) override {
return false;
}
void ms_handle_remote_reset(Connection *c) override {
}
bool ms_handle_refused(Connection *c) override {
return false;
}
private:
// drop this lock when calling ->send_message_mds() else mds might
// deadlock
ceph::mutex lock = ceph::make_mutex("MetricAggregator::lock");
MDSRank *mds;
MgrClient *mgrc;
// maintain a map of rank to list of clients so that when a rank
// goes away we cull metrics of clients connected to that rank.
std::map<mds_rank_t, std::unordered_set<entity_inst_t>> clients_by_rank;
// user query to metrics map
std::map<MDSPerfMetricQuery, std::map<MDSPerfMetricKey, PerformanceCounters>> query_metrics_map;
MDSPinger mds_pinger;
std::thread pinger;
std::map<mds_rank_t, entity_addrvec_t> active_rank_addrs;
bool stopping = false;
void handle_mds_metrics(const cref_t<MMDSMetrics> &m);
void refresh_metrics_for_rank(const entity_inst_t &client, mds_rank_t rank,
const Metrics &metrics);
void remove_metrics_for_rank(const entity_inst_t &client, mds_rank_t rank, bool remove);
void cull_metrics_for_rank(mds_rank_t rank);
void ping_all_active_ranks();
void set_perf_queries(const ConfigPayload &config_payload);
MetricPayload get_perf_reports();
};
#endif // CEPH_MDS_METRIC_AGGREGATOR_H
| 2,436 | 26.077778 | 98 | h |
null | ceph-main/src/mds/MetricsHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_MDS_METRICS_HANDLER_H
#define CEPH_MDS_METRICS_HANDLER_H
#include <thread>
#include <utility>
#include <boost/variant.hpp>
#include "msg/Dispatcher.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
#include "include/cephfs/metrics/Types.h"
#include "messages/MMDSPing.h"
#include "messages/MClientMetrics.h"
#include "MDSPerfMetricTypes.h"
class MDSRank;
class Session;
class MetricsHandler : public Dispatcher {
public:
MetricsHandler(CephContext *cct, MDSRank *mds);
bool ms_can_fast_dispatch_any() const override {
return true;
}
bool ms_can_fast_dispatch2(const cref_t<Message> &m) const override;
void ms_fast_dispatch2(const ref_t<Message> &m) override;
bool ms_dispatch2(const ref_t<Message> &m) override;
void ms_handle_connect(Connection *c) override {
}
bool ms_handle_reset(Connection *c) override {
return false;
}
void ms_handle_remote_reset(Connection *c) override {
}
bool ms_handle_refused(Connection *c) override {
return false;
}
void add_session(Session *session);
void remove_session(Session *session);
void init();
void shutdown();
void notify_mdsmap(const MDSMap &mdsmap);
private:
struct HandlePayloadVisitor : public boost::static_visitor<void> {
MetricsHandler *metrics_handler;
Session *session;
HandlePayloadVisitor(MetricsHandler *metrics_handler, Session *session)
: metrics_handler(metrics_handler), session(session) {
}
template <typename ClientMetricPayload>
inline void operator()(const ClientMetricPayload &payload) const {
metrics_handler->handle_payload(session, payload);
}
};
MDSRank *mds;
// drop this lock when calling ->send_message_mds() else mds might
// deadlock
ceph::mutex lock = ceph::make_mutex("MetricsHandler::lock");
// ISN sent by rank0 pinger is 1
version_t next_seq = 0;
// sequence number incremented on each update sent to rank 0.
// this is nowhere related to next_seq and is completely used
// locally to figure out if a session got added and removed
// within an update to rank 0.
version_t last_updated_seq = 0;
std::thread updater;
std::map<entity_inst_t, std::pair<version_t, Metrics>> client_metrics_map;
// address of rank 0 mds, so that the message can be sent without
// acquiring mds_lock. misdirected messages to rank 0 are taken
// care of by rank 0.
boost::optional<entity_addrvec_t> addr_rank0;
bool stopping = false;
void handle_payload(Session *session, const CapInfoPayload &payload);
void handle_payload(Session *session, const ReadLatencyPayload &payload);
void handle_payload(Session *session, const WriteLatencyPayload &payload);
void handle_payload(Session *session, const MetadataLatencyPayload &payload);
void handle_payload(Session *session, const DentryLeasePayload &payload);
void handle_payload(Session *session, const OpenedFilesPayload &payload);
void handle_payload(Session *session, const PinnedIcapsPayload &payload);
void handle_payload(Session *session, const OpenedInodesPayload &payload);
void handle_payload(Session *session, const ReadIoSizesPayload &payload);
void handle_payload(Session *session, const WriteIoSizesPayload &payload);
void handle_payload(Session *session, const UnknownPayload &payload);
void set_next_seq(version_t seq);
void reset_seq();
void handle_client_metrics(const cref_t<MClientMetrics> &m);
void handle_mds_ping(const cref_t<MMDSPing> &m);
void update_rank0();
};
#endif // CEPH_MDS_METRICS_HANDLER_H
| 3,640 | 30.66087 | 79 | h |
null | ceph-main/src/mds/Migrator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
* Handles the import and export of mds authorities and actual cache data.
* See src/doc/exports.txt for a description.
*/
#ifndef CEPH_MDS_MIGRATOR_H
#define CEPH_MDS_MIGRATOR_H
#include "include/types.h"
#include "MDSContext.h"
#include <map>
#include <list>
#include <set>
#include <string_view>
#include "messages/MExportCaps.h"
#include "messages/MExportCapsAck.h"
#include "messages/MExportDir.h"
#include "messages/MExportDirAck.h"
#include "messages/MExportDirCancel.h"
#include "messages/MExportDirDiscover.h"
#include "messages/MExportDirDiscoverAck.h"
#include "messages/MExportDirFinish.h"
#include "messages/MExportDirNotify.h"
#include "messages/MExportDirNotifyAck.h"
#include "messages/MExportDirPrep.h"
#include "messages/MExportDirPrepAck.h"
#include "messages/MGatherCaps.h"
class MDSRank;
class CDir;
class CInode;
class CDentry;
class Session;
class EImportStart;
class Migrator {
public:
// export stages. used to clean up intelligently if there's a failure.
const static int EXPORT_CANCELLED = 0; // cancelled
const static int EXPORT_CANCELLING = 1; // waiting for cancel notifyacks
const static int EXPORT_LOCKING = 2; // acquiring locks
const static int EXPORT_DISCOVERING = 3; // dest is disovering export dir
const static int EXPORT_FREEZING = 4; // we're freezing the dir tree
const static int EXPORT_PREPPING = 5; // sending dest spanning tree to export bounds
const static int EXPORT_WARNING = 6; // warning bystanders of dir_auth_pending
const static int EXPORT_EXPORTING = 7; // sent actual export, waiting for ack
const static int EXPORT_LOGGINGFINISH = 8; // logging EExportFinish
const static int EXPORT_NOTIFYING = 9; // waiting for notifyacks
// -- imports --
const static int IMPORT_DISCOVERING = 1; // waiting for prep
const static int IMPORT_DISCOVERED = 2; // waiting for prep
const static int IMPORT_PREPPING = 3; // opening dirs on bounds
const static int IMPORT_PREPPED = 4; // opened bounds, waiting for import
const static int IMPORT_LOGGINGSTART = 5; // got import, logging EImportStart
const static int IMPORT_ACKING = 6; // logged EImportStart, sent ack, waiting for finish
const static int IMPORT_FINISHING = 7; // sent cap imports, waiting for finish
const static int IMPORT_ABORTING = 8; // notifying bystanders of an abort before unfreezing
// -- cons --
Migrator(MDSRank *m, MDCache *c);
static std::string_view get_export_statename(int s) {
switch (s) {
case EXPORT_CANCELLING: return "cancelling";
case EXPORT_LOCKING: return "locking";
case EXPORT_DISCOVERING: return "discovering";
case EXPORT_FREEZING: return "freezing";
case EXPORT_PREPPING: return "prepping";
case EXPORT_WARNING: return "warning";
case EXPORT_EXPORTING: return "exporting";
case EXPORT_LOGGINGFINISH: return "loggingfinish";
case EXPORT_NOTIFYING: return "notifying";
default: ceph_abort(); return std::string_view();
}
}
static std::string_view get_import_statename(int s) {
switch (s) {
case IMPORT_DISCOVERING: return "discovering";
case IMPORT_DISCOVERED: return "discovered";
case IMPORT_PREPPING: return "prepping";
case IMPORT_PREPPED: return "prepped";
case IMPORT_LOGGINGSTART: return "loggingstart";
case IMPORT_ACKING: return "acking";
case IMPORT_FINISHING: return "finishing";
case IMPORT_ABORTING: return "aborting";
default: ceph_abort(); return std::string_view();
}
}
void handle_conf_change(const std::set<std::string>& changed, const MDSMap& mds_map);
void dispatch(const cref_t<Message> &);
void show_importing();
void show_exporting();
int get_num_exporting() const { return export_state.size(); }
int get_export_queue_size() const { return export_queue.size(); }
// -- status --
int is_exporting(CDir *dir) const {
auto it = export_state.find(dir);
if (it != export_state.end()) return it->second.state;
return 0;
}
bool is_exporting() const { return !export_state.empty(); }
int is_importing(dirfrag_t df) const {
auto it = import_state.find(df);
if (it != import_state.end()) return it->second.state;
return 0;
}
bool is_importing() const { return !import_state.empty(); }
bool is_ambiguous_import(dirfrag_t df) const {
auto it = import_state.find(df);
if (it == import_state.end())
return false;
if (it->second.state >= IMPORT_LOGGINGSTART &&
it->second.state < IMPORT_ABORTING)
return true;
return false;
}
int get_import_state(dirfrag_t df) const {
auto it = import_state.find(df);
ceph_assert(it != import_state.end());
return it->second.state;
}
int get_import_peer(dirfrag_t df) const {
auto it = import_state.find(df);
ceph_assert(it != import_state.end());
return it->second.peer;
}
int get_export_state(CDir *dir) const {
auto it = export_state.find(dir);
ceph_assert(it != export_state.end());
return it->second.state;
}
// this returns true if we are export @dir,
// and are not waiting for @who to be
// be warned of ambiguous auth.
// only returns meaningful results during EXPORT_WARNING state.
bool export_has_warned(CDir *dir, mds_rank_t who) {
auto it = export_state.find(dir);
ceph_assert(it != export_state.end());
ceph_assert(it->second.state == EXPORT_WARNING);
return (it->second.warning_ack_waiting.count(who) == 0);
}
bool export_has_notified(CDir *dir, mds_rank_t who) const {
auto it = export_state.find(dir);
ceph_assert(it != export_state.end());
ceph_assert(it->second.state == EXPORT_NOTIFYING);
return (it->second.notify_ack_waiting.count(who) == 0);
}
void export_freeze_inc_num_waiters(CDir *dir) {
auto it = export_state.find(dir);
ceph_assert(it != export_state.end());
it->second.num_remote_waiters++;
}
void find_stale_export_freeze();
// -- misc --
void handle_mds_failure_or_stop(mds_rank_t who);
void audit();
// -- import/export --
// exporter
void dispatch_export_dir(MDRequestRef& mdr, int count);
void export_dir(CDir *dir, mds_rank_t dest);
void export_empty_import(CDir *dir);
void export_dir_nicely(CDir *dir, mds_rank_t dest);
void maybe_do_queued_export();
void clear_export_queue() {
export_queue.clear();
export_queue_gen++;
}
void maybe_split_export(CDir* dir, uint64_t max_size, bool null_okay,
std::vector<std::pair<CDir*, size_t> >& results);
bool export_try_grab_locks(CDir *dir, MutationRef& mut);
void get_export_client_set(CDir *dir, std::set<client_t> &client_set);
void get_export_client_set(CInode *in, std::set<client_t> &client_set);
void encode_export_inode(CInode *in, bufferlist& bl,
std::map<client_t,entity_inst_t>& exported_client_map,
std::map<client_t,client_metadata_t>& exported_client_metadata_map);
void encode_export_inode_caps(CInode *in, bool auth_cap, bufferlist& bl,
std::map<client_t,entity_inst_t>& exported_client_map,
std::map<client_t,client_metadata_t>& exported_client_metadata_map);
void finish_export_inode(CInode *in, mds_rank_t target,
std::map<client_t,Capability::Import>& peer_imported,
MDSContext::vec& finished);
void finish_export_inode_caps(CInode *in, mds_rank_t target,
std::map<client_t,Capability::Import>& peer_imported);
void encode_export_dir(bufferlist& exportbl,
CDir *dir,
std::map<client_t,entity_inst_t>& exported_client_map,
std::map<client_t,client_metadata_t>& exported_client_metadata_map,
uint64_t &num_exported);
void finish_export_dir(CDir *dir, mds_rank_t target,
std::map<inodeno_t,std::map<client_t,Capability::Import> >& peer_imported,
MDSContext::vec& finished, int *num_dentries);
void clear_export_proxy_pins(CDir *dir);
void export_caps(CInode *in);
void decode_import_inode(CDentry *dn, bufferlist::const_iterator& blp,
mds_rank_t oldauth, LogSegment *ls,
std::map<CInode*, std::map<client_t,Capability::Export> >& cap_imports,
std::list<ScatterLock*>& updated_scatterlocks);
void decode_import_inode_caps(CInode *in, bool auth_cap, bufferlist::const_iterator &blp,
std::map<CInode*, std::map<client_t,Capability::Export> >& cap_imports);
void finish_import_inode_caps(CInode *in, mds_rank_t from, bool auth_cap,
const std::map<client_t,std::pair<Session*,uint64_t> >& smap,
const std::map<client_t,Capability::Export> &export_map,
std::map<client_t,Capability::Import> &import_map);
void decode_import_dir(bufferlist::const_iterator& blp,
mds_rank_t oldauth,
CDir *import_root,
EImportStart *le,
LogSegment *ls,
std::map<CInode*, std::map<client_t,Capability::Export> >& cap_imports,
std::list<ScatterLock*>& updated_scatterlocks, int &num_imported);
void import_reverse(CDir *dir);
void import_finish(CDir *dir, bool notify, bool last=true);
protected:
struct export_base_t {
export_base_t(dirfrag_t df, mds_rank_t d, unsigned c, uint64_t g) :
dirfrag(df), dest(d), pending_children(c), export_queue_gen(g) {}
dirfrag_t dirfrag;
mds_rank_t dest;
unsigned pending_children;
uint64_t export_queue_gen;
bool restart = false;
};
// export fun
struct export_state_t {
export_state_t() {}
int state = 0;
mds_rank_t peer = MDS_RANK_NONE;
uint64_t tid = 0;
std::set<mds_rank_t> warning_ack_waiting;
std::set<mds_rank_t> notify_ack_waiting;
std::map<inodeno_t,std::map<client_t,Capability::Import> > peer_imported;
MutationRef mut;
size_t approx_size = 0;
// for freeze tree deadlock detection
utime_t last_cum_auth_pins_change;
int last_cum_auth_pins = 0;
int num_remote_waiters = 0; // number of remote authpin waiters
std::shared_ptr<export_base_t> parent;
};
// import fun
struct import_state_t {
import_state_t() : mut() {}
int state = 0;
mds_rank_t peer = 0;
uint64_t tid = 0;
std::set<mds_rank_t> bystanders;
std::list<dirfrag_t> bound_ls;
std::list<ScatterLock*> updated_scatterlocks;
std::map<client_t,std::pair<Session*,uint64_t> > session_map;
std::map<CInode*, std::map<client_t,Capability::Export> > peer_exports;
MutationRef mut;
};
typedef std::map<CDir*, export_state_t>::iterator export_state_iterator;
friend class C_MDC_ExportFreeze;
friend class C_MDS_ExportFinishLogged;
friend class C_M_ExportGo;
friend class C_M_ExportSessionsFlushed;
friend class C_MDS_ExportDiscover;
friend class C_MDS_ExportPrep;
friend class MigratorContext;
friend class MigratorLogContext;
friend class C_MDS_ImportDirLoggedStart;
friend class C_MDS_ImportDirLoggedFinish;
friend class C_M_LoggedImportCaps;
void handle_export_discover_ack(const cref_t<MExportDirDiscoverAck> &m);
void export_frozen(CDir *dir, uint64_t tid);
void handle_export_prep_ack(const cref_t<MExportDirPrepAck> &m);
void export_sessions_flushed(CDir *dir, uint64_t tid);
void export_go(CDir *dir);
void export_go_synced(CDir *dir, uint64_t tid);
void export_try_cancel(CDir *dir, bool notify_peer=true);
void export_cancel_finish(export_state_iterator& it);
void export_reverse(CDir *dir, export_state_t& stat);
void export_notify_abort(CDir *dir, export_state_t& stat, std::set<CDir*>& bounds);
void handle_export_ack(const cref_t<MExportDirAck> &m);
void export_logged_finish(CDir *dir);
void handle_export_notify_ack(const cref_t<MExportDirNotifyAck> &m);
void export_finish(CDir *dir);
void child_export_finish(std::shared_ptr<export_base_t>& parent, bool success);
void encode_export_prep_trace(bufferlist& bl, CDir *bound, CDir *dir, export_state_t &es,
std::set<inodeno_t> &inodes_added, std::set<dirfrag_t> &dirfrags_added);
void decode_export_prep_trace(bufferlist::const_iterator& blp, mds_rank_t oldauth, MDSContext::vec &finished);
void handle_gather_caps(const cref_t<MGatherCaps> &m);
// importer
void handle_export_discover(const cref_t<MExportDirDiscover> &m, bool started=false);
void handle_export_cancel(const cref_t<MExportDirCancel> &m);
void handle_export_prep(const cref_t<MExportDirPrep> &m, bool did_assim=false);
void handle_export_dir(const cref_t<MExportDir> &m);
void import_reverse_discovering(dirfrag_t df);
void import_reverse_discovered(dirfrag_t df, CInode *diri);
void import_reverse_prepping(CDir *dir, import_state_t& stat);
void import_remove_pins(CDir *dir, std::set<CDir*>& bounds);
void import_reverse_unfreeze(CDir *dir);
void import_reverse_final(CDir *dir);
void import_notify_abort(CDir *dir, std::set<CDir*>& bounds);
void import_notify_finish(CDir *dir, std::set<CDir*>& bounds);
void import_logged_start(dirfrag_t df, CDir *dir, mds_rank_t from,
std::map<client_t,std::pair<Session*,uint64_t> >& imported_session_map);
void handle_export_finish(const cref_t<MExportDirFinish> &m);
void handle_export_caps(const cref_t<MExportCaps> &m);
void handle_export_caps_ack(const cref_t<MExportCapsAck> &m);
void logged_import_caps(CInode *in,
mds_rank_t from,
std::map<client_t,std::pair<Session*,uint64_t> >& imported_session_map,
std::map<CInode*, std::map<client_t,Capability::Export> >& cap_imports);
// bystander
void handle_export_notify(const cref_t<MExportDirNotify> &m);
std::map<CDir*, export_state_t> export_state;
uint64_t total_exporting_size = 0;
unsigned num_locking_exports = 0; // exports in locking state (approx_size == 0)
std::list<std::pair<dirfrag_t,mds_rank_t> > export_queue;
uint64_t export_queue_gen = 1;
std::map<dirfrag_t, import_state_t> import_state;
private:
MDSRank *mds;
MDCache *mdcache;
uint64_t max_export_size = 0;
bool inject_session_race = false;
};
#endif
| 14,279 | 36.6781 | 112 | h |
null | ceph-main/src/mds/Mutation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_MUTATION_H
#define CEPH_MDS_MUTATION_H
#include "include/interval_set.h"
#include "include/elist.h"
#include "include/filepath.h"
#include "MDSCacheObject.h"
#include "MDSContext.h"
#include "SimpleLock.h"
#include "Capability.h"
#include "BatchOp.h"
#include "common/TrackedOp.h"
#include "messages/MClientRequest.h"
#include "messages/MMDSPeerRequest.h"
#include "messages/MClientReply.h"
class LogSegment;
class CInode;
class CDir;
class CDentry;
class Session;
class ScatterLock;
struct sr_t;
struct MDLockCache;
struct MutationImpl : public TrackedOp {
public:
// -- my pins and auth_pins --
struct ObjectState {
bool pinned = false;
bool auth_pinned = false;
mds_rank_t remote_auth_pinned = MDS_RANK_NONE;
};
// held locks
struct LockOp {
enum {
RDLOCK = 1,
WRLOCK = 2,
XLOCK = 4,
REMOTE_WRLOCK = 8,
STATE_PIN = 16, // no RW after locked, just pin lock state
};
LockOp(SimpleLock *l, unsigned f=0, mds_rank_t t=MDS_RANK_NONE) :
lock(l), flags(f), wrlock_target(t) {}
bool is_rdlock() const { return !!(flags & RDLOCK); }
bool is_xlock() const { return !!(flags & XLOCK); }
bool is_wrlock() const { return !!(flags & WRLOCK); }
void clear_wrlock() const { flags &= ~WRLOCK; }
bool is_remote_wrlock() const { return !!(flags & REMOTE_WRLOCK); }
void clear_remote_wrlock() const {
flags &= ~REMOTE_WRLOCK;
wrlock_target = MDS_RANK_NONE;
}
bool is_state_pin() const { return !!(flags & STATE_PIN); }
bool operator<(const LockOp& r) const {
return lock < r.lock;
}
SimpleLock* lock;
mutable unsigned flags;
mutable mds_rank_t wrlock_target;
};
struct LockOpVec : public std::vector<LockOp> {
LockOpVec() {
reserve(32);
}
void add_rdlock(SimpleLock *lock) {
emplace_back(lock, LockOp::RDLOCK);
}
void erase_rdlock(SimpleLock *lock);
void add_xlock(SimpleLock *lock, int idx=-1) {
if (idx >= 0)
emplace(cbegin() + idx, lock, LockOp::XLOCK);
else
emplace_back(lock, LockOp::XLOCK);
}
void add_wrlock(SimpleLock *lock, int idx=-1) {
if (idx >= 0)
emplace(cbegin() + idx, lock, LockOp::WRLOCK);
else
emplace_back(lock, LockOp::WRLOCK);
}
void add_remote_wrlock(SimpleLock *lock, mds_rank_t rank) {
ceph_assert(rank != MDS_RANK_NONE);
emplace_back(lock, LockOp::REMOTE_WRLOCK, rank);
}
void lock_scatter_gather(SimpleLock *lock) {
emplace_back(lock, LockOp::WRLOCK | LockOp::STATE_PIN);
}
void sort_and_merge();
};
using lock_set = std::set<LockOp>;
using lock_iterator = lock_set::iterator;
// keep our default values synced with MDRequestParam's
MutationImpl() : TrackedOp(nullptr, ceph_clock_now()) {}
MutationImpl(OpTracker *tracker, utime_t initiated,
const metareqid_t &ri, __u32 att=0, mds_rank_t peer_to=MDS_RANK_NONE)
: TrackedOp(tracker, initiated),
reqid(ri), attempt(att),
peer_to_mds(peer_to) {}
~MutationImpl() override {
ceph_assert(!locking);
ceph_assert(!lock_cache);
ceph_assert(num_pins == 0);
ceph_assert(num_auth_pins == 0);
}
const ObjectState* find_object_state(MDSCacheObject *obj) const {
auto it = object_states.find(obj);
return it != object_states.end() ? &it->second : nullptr;
}
bool is_any_remote_auth_pin() const { return num_remote_auth_pins > 0; }
void disable_lock_cache() {
lock_cache_disabled = true;
}
lock_iterator emplace_lock(SimpleLock *l, unsigned f=0, mds_rank_t t=MDS_RANK_NONE) {
last_locked = l;
return locks.emplace(l, f, t).first;
}
bool is_rdlocked(SimpleLock *lock) const;
bool is_wrlocked(SimpleLock *lock) const;
bool is_xlocked(SimpleLock *lock) const {
auto it = locks.find(lock);
return it != locks.end() && it->is_xlock();
}
bool is_remote_wrlocked(SimpleLock *lock) const {
auto it = locks.find(lock);
return it != locks.end() && it->is_remote_wrlock();
}
bool is_last_locked(SimpleLock *lock) const {
return lock == last_locked;
}
bool is_leader() const { return peer_to_mds == MDS_RANK_NONE; }
bool is_peer() const { return peer_to_mds != MDS_RANK_NONE; }
client_t get_client() const {
if (reqid.name.is_client())
return client_t(reqid.name.num());
return -1;
}
void set_mds_stamp(utime_t t) {
mds_stamp = t;
}
utime_t get_mds_stamp() const {
return mds_stamp;
}
void set_op_stamp(utime_t t) {
op_stamp = t;
}
utime_t get_op_stamp() const {
if (op_stamp != utime_t())
return op_stamp;
return get_mds_stamp();
}
// pin items in cache
void pin(MDSCacheObject *object);
void unpin(MDSCacheObject *object);
void set_stickydirs(CInode *in);
void put_stickydirs();
void drop_pins();
void start_locking(SimpleLock *lock, int target=-1);
void finish_locking(SimpleLock *lock);
// auth pins
bool is_auth_pinned(MDSCacheObject *object) const;
void auth_pin(MDSCacheObject *object);
void auth_unpin(MDSCacheObject *object);
void drop_local_auth_pins();
void set_remote_auth_pinned(MDSCacheObject* object, mds_rank_t from);
void _clear_remote_auth_pinned(ObjectState& stat);
void add_projected_node(MDSCacheObject* obj) {
projected_nodes.insert(obj);
}
void remove_projected_node(MDSCacheObject* obj) {
projected_nodes.erase(obj);
}
bool is_projected(MDSCacheObject *obj) const {
return projected_nodes.count(obj);
}
void add_updated_lock(ScatterLock *lock);
void add_cow_inode(CInode *in);
void add_cow_dentry(CDentry *dn);
void apply();
void cleanup();
virtual void print(std::ostream &out) const {
out << "mutation(" << this << ")";
}
virtual void dump(ceph::Formatter *f) const {}
void _dump_op_descriptor_unlocked(std::ostream& stream) const override;
metareqid_t reqid;
__u32 attempt = 0; // which attempt for this request
LogSegment *ls = nullptr; // the log segment i'm committing to
// flag mutation as peer
mds_rank_t peer_to_mds = MDS_RANK_NONE; // this is a peer request if >= 0.
ceph::unordered_map<MDSCacheObject*, ObjectState> object_states;
int num_pins = 0;
int num_auth_pins = 0;
int num_remote_auth_pins = 0;
// cache pins (so things don't expire)
CInode* stickydiri = nullptr;
lock_set locks; // full ordering
MDLockCache* lock_cache = nullptr;
bool lock_cache_disabled = false;
SimpleLock *last_locked = nullptr;
// Lock we are currently trying to acquire. If we give up for some reason,
// be sure to eval() this.
SimpleLock *locking = nullptr;
mds_rank_t locking_target_mds = -1;
// if this flag is set, do not attempt to acquire further locks.
// (useful for wrlock, which may be a moving auth target)
enum {
SNAP_LOCKED = 1,
SNAP2_LOCKED = 2,
PATH_LOCKED = 4,
ALL_LOCKED = 8,
};
int locking_state = 0;
bool committing = false;
bool aborted = false;
bool killed = false;
// for applying projected inode changes
std::set<MDSCacheObject*> projected_nodes;
std::list<ScatterLock*> updated_locks;
std::list<CInode*> dirty_cow_inodes;
std::list<std::pair<CDentry*,version_t> > dirty_cow_dentries;
private:
utime_t mds_stamp; ///< mds-local timestamp (real time)
utime_t op_stamp; ///< op timestamp (client provided)
};
/**
* MDRequestImpl: state we track for requests we are currently processing.
* mostly information about locks held, so that we can drop them all
* the request is finished or forwarded. see request_*().
*/
struct MDRequestImpl : public MutationImpl {
// TrackedOp stuff
typedef boost::intrusive_ptr<MDRequestImpl> Ref;
// break rarely-used fields into a separately allocated structure
// to save memory for most ops
struct More {
More() {}
int peer_error = 0;
std::set<mds_rank_t> peers; // mds nodes that have peer requests to me (implies client_request)
std::set<mds_rank_t> waiting_on_peer; // peers i'm waiting for peerreq replies from.
// for rename/link/unlink
std::set<mds_rank_t> witnessed; // nodes who have journaled a RenamePrepare
std::map<MDSCacheObject*,version_t> pvmap;
bool has_journaled_peers = false;
bool peer_update_journaled = false;
bool peer_rolling_back = false;
// for rename
std::set<mds_rank_t> extra_witnesses; // replica list from srcdn auth (rename)
mds_rank_t srcdn_auth_mds = MDS_RANK_NONE;
ceph::buffer::list inode_import;
version_t inode_import_v = 0;
CInode* rename_inode = nullptr;
bool is_freeze_authpin = false;
bool is_ambiguous_auth = false;
bool is_remote_frozen_authpin = false;
bool is_inode_exporter = false;
bool rdonly_checks = false;
std::map<client_t, std::pair<Session*, uint64_t> > imported_session_map;
std::map<CInode*, std::map<client_t,Capability::Export> > cap_imports;
// for lock/flock
bool flock_was_waiting = false;
// for snaps
version_t stid = 0;
ceph::buffer::list snapidbl;
sr_t *srci_srnode = nullptr;
sr_t *desti_srnode = nullptr;
// called when peer commits or aborts
Context *peer_commit = nullptr;
ceph::buffer::list rollback_bl;
MDSContext::vec waiting_for_finish;
// export & fragment
CDir* export_dir = nullptr;
dirfrag_t fragment_base;
// for internal ops doing lookup
filepath filepath1;
filepath filepath2;
} *_more = nullptr;
// ---------------------------------------------------
struct Params {
// keep these default values synced to MutationImpl's
Params() {}
const utime_t& get_recv_stamp() const {
return initiated;
}
const utime_t& get_throttle_stamp() const {
return throttled;
}
const utime_t& get_recv_complete_stamp() const {
return all_read;
}
const utime_t& get_dispatch_stamp() const {
return dispatched;
}
metareqid_t reqid;
__u32 attempt = 0;
ceph::cref_t<MClientRequest> client_req;
ceph::cref_t<Message> triggering_peer_req;
mds_rank_t peer_to = MDS_RANK_NONE;
utime_t initiated;
utime_t throttled, all_read, dispatched;
int internal_op = -1;
};
MDRequestImpl(const Params* params, OpTracker *tracker) :
MutationImpl(tracker, params->initiated,
params->reqid, params->attempt, params->peer_to),
item_session_request(this), client_request(params->client_req),
internal_op(params->internal_op) {}
~MDRequestImpl() override;
More* more();
bool has_more() const;
bool has_witnesses();
bool peer_did_prepare();
bool peer_rolling_back();
bool freeze_auth_pin(CInode *inode);
void unfreeze_auth_pin(bool clear_inode=false);
void set_remote_frozen_auth_pin(CInode *inode);
bool can_auth_pin(MDSCacheObject *object);
void drop_local_auth_pins();
void set_ambiguous_auth(CInode *inode);
void clear_ambiguous_auth();
const filepath& get_filepath();
const filepath& get_filepath2();
void set_filepath(const filepath& fp);
void set_filepath2(const filepath& fp);
bool is_queued_for_replay() const;
int compare_paths();
bool can_batch();
bool is_batch_head() {
return batch_op_map != nullptr;
}
std::unique_ptr<BatchOp> release_batch_op();
void print(std::ostream &out) const override;
void dump(ceph::Formatter *f) const override;
ceph::cref_t<MClientRequest> release_client_request();
void reset_peer_request(const ceph::cref_t<MMDSPeerRequest>& req=nullptr);
Session *session = nullptr;
elist<MDRequestImpl*>::item item_session_request; // if not on list, op is aborted.
// -- i am a client (leader) request
ceph::cref_t<MClientRequest> client_request; // client request (if any)
// tree and depth info of path1 and path2
inodeno_t dir_root[2] = {0, 0};
int dir_depth[2] = {-1, -1};
file_layout_t dir_layout;
// store up to two sets of dn vectors, inode pointers, for request path1 and path2.
std::vector<CDentry*> dn[2];
CInode *in[2] = {};
CDentry *straydn = nullptr;
snapid_t snapid = CEPH_NOSNAP;
snapid_t snapid_diff_other = CEPH_NOSNAP;
CInode *tracei = nullptr;
CDentry *tracedn = nullptr;
inodeno_t alloc_ino = 0, used_prealloc_ino = 0;
interval_set<inodeno_t> prealloc_inos;
int snap_caps = 0;
int getattr_caps = 0; ///< caps requested by getattr
bool no_early_reply = false;
bool did_early_reply = false;
bool o_trunc = false; ///< request is an O_TRUNC mutation
bool has_completed = false; ///< request has already completed
ceph::buffer::list reply_extra_bl;
// inos we did a embedded cap release on, and may need to eval if we haven't since reissued
std::map<vinodeno_t, ceph_seq_t> cap_releases;
// -- i am a peer request
ceph::cref_t<MMDSPeerRequest> peer_request; // peer request (if one is pending; implies peer == true)
// -- i am an internal op
int internal_op;
Context *internal_op_finish = nullptr;
void *internal_op_private = nullptr;
// indicates how may retries of request have been made
int retry = 0;
std::map<int, std::unique_ptr<BatchOp> > *batch_op_map = nullptr;
// indicator for vxattr osdmap update
bool waited_for_osdmap = false;
protected:
void _dump(ceph::Formatter *f) const override;
void _dump_op_descriptor_unlocked(std::ostream& stream) const override;
private:
mutable ceph::spinlock msg_lock;
};
struct MDPeerUpdate {
MDPeerUpdate(int oo, ceph::buffer::list &rbl) :
origop(oo) {
rollback = std::move(rbl);
}
~MDPeerUpdate() {
if (waiter)
waiter->complete(0);
}
int origop;
ceph::buffer::list rollback;
Context *waiter = nullptr;
std::set<CInode*> olddirs;
std::set<CInode*> unlinked;
};
struct MDLockCacheItem {
MDLockCache *parent = nullptr;
elist<MDLockCacheItem*>::item item_lock;
};
struct MDLockCache : public MutationImpl {
using LockItem = MDLockCacheItem;
struct DirItem {
MDLockCache *parent = nullptr;
elist<DirItem*>::item item_dir;
};
MDLockCache(Capability *cap, int op) :
MutationImpl(), diri(cap->get_inode()), client_cap(cap), opcode(op) {
client_cap->lock_caches.push_back(&item_cap_lock_cache);
}
CInode *get_dir_inode() { return diri; }
void set_dir_layout(file_layout_t& layout) {
dir_layout = layout;
}
const file_layout_t& get_dir_layout() const {
return dir_layout;
}
void attach_locks();
void attach_dirfrags(std::vector<CDir*>&& dfv);
void detach_locks();
void detach_dirfrags();
CInode *diri;
Capability *client_cap;
int opcode;
file_layout_t dir_layout;
elist<MDLockCache*>::item item_cap_lock_cache;
// link myself to locked locks
std::unique_ptr<LockItem[]> items_lock;
// link myself to auth-pinned dirfrags
std::unique_ptr<DirItem[]> items_dir;
std::vector<CDir*> auth_pinned_dirfrags;
int ref = 1;
bool invalidating = false;
};
typedef boost::intrusive_ptr<MutationImpl> MutationRef;
typedef boost::intrusive_ptr<MDRequestImpl> MDRequestRef;
inline std::ostream& operator<<(std::ostream &out, const MutationImpl &mut)
{
mut.print(out);
return out;
}
#endif
| 15,534 | 27.983209 | 109 | h |
null | ceph-main/src/mds/OpenFileTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef OPEN_FILE_TABLE_H
#define OPEN_FILE_TABLE_H
#include "mdstypes.h"
#include "Anchor.h"
#include "MDSContext.h"
class CDir;
class CInode;
class MDSRank;
struct ObjectOperation;
class OpenFileTable
{
public:
explicit OpenFileTable(MDSRank *m);
~OpenFileTable();
void add_inode(CInode *in);
void remove_inode(CInode *in);
void add_dirfrag(CDir *dir);
void remove_dirfrag(CDir *dir);
void notify_link(CInode *in);
void notify_unlink(CInode *in);
bool is_any_dirty() const { return !dirty_items.empty(); }
void commit(MDSContext *c, uint64_t log_seq, int op_prio);
uint64_t get_committed_log_seq() const { return committed_log_seq; }
bool is_any_committing() const { return num_pending_commit > 0; }
void load(MDSContext *c);
bool is_loaded() const { return load_done; }
void wait_for_load(MDSContext *c) {
ceph_assert(!load_done);
waiting_for_load.push_back(c);
}
bool prefetch_inodes();
bool is_prefetched() const { return prefetch_state == DONE; }
void wait_for_prefetch(MDSContext *c) {
ceph_assert(!is_prefetched());
waiting_for_prefetch.push_back(c);
}
bool should_log_open(CInode *in);
void note_destroyed_inos(uint64_t seq, const std::vector<inodeno_t>& inos);
void trim_destroyed_inos(uint64_t seq);
protected:
friend class C_IO_OFT_Recover;
friend class C_IO_OFT_Load;
friend class C_IO_OFT_Save;
friend class C_IO_OFT_Journal;
friend class C_OFT_OpenInoFinish;
uint64_t MAX_ITEMS_PER_OBJ = g_conf().get_val<uint64_t>("osd_deep_scrub_large_omap_object_key_threshold");
static const unsigned MAX_OBJECTS = 1024; // (1024 * osd_deep_scrub_large_omap_object_key_threshold) items at most
static const int DIRTY_NEW = -1;
static const int DIRTY_UNDEF = -2;
unsigned num_pending_commit = 0;
void _encode_header(bufferlist& bl, int j_state);
void _commit_finish(int r, uint64_t log_seq, MDSContext *fin);
void _journal_finish(int r, uint64_t log_seq, MDSContext *fin,
std::map<unsigned, std::vector<ObjectOperation> >& ops);
void get_ref(CInode *in, frag_t fg=-1U);
void put_ref(CInode *in, frag_t fg=-1U);
object_t get_object_name(unsigned idx) const;
void _reset_states() {
omap_num_objs = 0;
omap_num_items.resize(0);
journal_state = JOURNAL_NONE;
loaded_journals.clear();
loaded_anchor_map.clear();
}
void _read_omap_values(const std::string& key, unsigned idx, bool first);
void _load_finish(int op_r, int header_r, int values_r,
unsigned idx, bool first, bool more,
bufferlist &header_bl,
std::map<std::string, bufferlist> &values);
void _recover_finish(int r);
void _open_ino_finish(inodeno_t ino, int r);
void _prefetch_inodes();
void _prefetch_dirfrags();
void _get_ancestors(const Anchor& parent,
std::vector<inode_backpointer_t>& ancestors,
mds_rank_t& auth_hint);
MDSRank *mds;
version_t omap_version = 0;
unsigned omap_num_objs = 0;
std::vector<unsigned> omap_num_items;
std::map<inodeno_t, OpenedAnchor> anchor_map;
std::map<inodeno_t, int> dirty_items; // ino -> dirty state
uint64_t committed_log_seq = 0;
uint64_t committing_log_seq = 0;
enum {
JOURNAL_NONE = 0,
JOURNAL_START = 1,
JOURNAL_FINISH = 2,
};
int journal_state = 0;
std::vector<std::map<std::string, bufferlist> > loaded_journals;
std::map<inodeno_t, RecoveredAnchor> loaded_anchor_map;
MDSContext::vec waiting_for_load;
bool load_done = false;
enum {
DIR_INODES = 1,
DIRFRAGS = 2,
FILE_INODES = 3,
DONE = 4,
};
unsigned prefetch_state = 0;
unsigned num_opening_inodes = 0;
MDSContext::vec waiting_for_prefetch;
std::map<uint64_t, std::vector<inodeno_t> > logseg_destroyed_inos;
std::set<inodeno_t> destroyed_inos_set;
std::unique_ptr<PerfCounters> logger;
};
#endif
| 4,245 | 26.393548 | 116 | h |
null | ceph-main/src/mds/PurgeQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PURGE_QUEUE_H_
#define PURGE_QUEUE_H_
#include "include/compact_set.h"
#include "common/Finisher.h"
#include "mds/MDSMap.h"
#include "osdc/Journaler.h"
/**
* Descriptor of the work associated with purging a file. We record
* the minimal amount of information from the inode such as the size
* and layout: all other un-needed inode metadata (times, permissions, etc)
* has been discarded.
*/
class PurgeItem
{
public:
enum Action : uint8_t {
NONE = 0,
PURGE_FILE = 1,
TRUNCATE_FILE,
PURGE_DIR
};
PurgeItem() {}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &p);
static Action str_to_type(std::string_view str) {
return PurgeItem::actions.at(std::string(str));
}
void dump(Formatter *f) const
{
f->dump_int("action", action);
f->dump_int("ino", ino);
f->dump_int("size", size);
f->open_object_section("layout");
layout.dump(f);
f->close_section();
f->open_object_section("SnapContext");
snapc.dump(f);
f->close_section();
f->open_object_section("fragtree");
fragtree.dump(f);
f->close_section();
}
std::string_view get_type_str() const;
utime_t stamp;
//None PurgeItem serves as NoOp for splicing out journal entries;
//so there has to be a "pad_size" to specify the size of journal
//space to be spliced.
uint32_t pad_size = 0;
Action action = NONE;
inodeno_t ino = 0;
uint64_t size = 0;
file_layout_t layout;
std::vector<int64_t> old_pools;
SnapContext snapc;
fragtree_t fragtree;
private:
static const std::map<std::string, PurgeItem::Action> actions;
};
WRITE_CLASS_ENCODER(PurgeItem)
enum {
l_pq_first = 3500,
// How many items have been finished by PurgeQueue
l_pq_executing_ops,
l_pq_executing_ops_high_water,
l_pq_executing,
l_pq_executing_high_water,
l_pq_executed,
l_pq_item_in_journal,
l_pq_last
};
struct PurgeItemCommitOp {
public:
enum PurgeType : uint8_t {
PURGE_OP_RANGE = 0,
PURGE_OP_REMOVE = 1,
PURGE_OP_ZERO
};
PurgeItemCommitOp(PurgeItem _item, PurgeType _type, int _flags)
: item(_item), type(_type), flags(_flags) {}
PurgeItemCommitOp(PurgeItem _item, PurgeType _type, int _flags,
object_t _oid, object_locator_t _oloc)
: item(_item), type(_type), flags(_flags), oid(_oid), oloc(_oloc) {}
PurgeItem item;
PurgeType type;
int flags;
object_t oid;
object_locator_t oloc;
};
/**
* A persistent queue of PurgeItems. This class both writes and reads
* to the queue. There is one of these per MDS rank.
*
* Note that this class does not take a reference to MDSRank: we are
* independent of all the metadata structures and do not need to
* take mds_lock for anything.
*/
class PurgeQueue
{
public:
PurgeQueue(
CephContext *cct_,
mds_rank_t rank_,
const int64_t metadata_pool_,
Objecter *objecter_,
Context *on_error);
~PurgeQueue();
void init();
void activate();
void shutdown();
void create_logger();
// Write an empty queue, use this during MDS rank creation
void create(Context *completion);
// Read the Journaler header for an existing queue and start consuming
void open(Context *completion);
void wait_for_recovery(Context *c);
// Submit one entry to the work queue. Call back when it is persisted
// to the queue (there is no callback for when it is executed)
void push(const PurgeItem &pi, Context *completion);
void _commit_ops(int r, const std::vector<PurgeItemCommitOp>& ops_vec, uint64_t expire_to);
// If the on-disk queue is empty and we are not currently processing
// anything.
bool is_idle() const;
/**
* Signal to the PurgeQueue that you would like it to hurry up and
* finish consuming everything in the queue. Provides progress
* feedback.
*
* @param progress: bytes consumed since we started draining
* @param progress_total: max bytes that were outstanding during purge
* @param in_flight_count: number of file purges currently in flight
*
* @returns true if drain is complete
*/
bool drain(
uint64_t *progress,
uint64_t *progress_total,
size_t *in_flight_count);
void update_op_limit(const MDSMap &mds_map);
void handle_conf_change(const std::set<std::string>& changed, const MDSMap& mds_map);
private:
uint32_t _calculate_ops(const PurgeItem &item) const;
bool _can_consume();
// recover the journal write_pos (drop any partial written entry)
void _recover();
/**
* @return true if we were in a position to try and consume something:
* does not mean we necessarily did.
*/
bool _consume();
void _execute_item(const PurgeItem &item, uint64_t expire_to);
void _execute_item_complete(uint64_t expire_to);
void _go_readonly(int r);
CephContext *cct;
const mds_rank_t rank;
ceph::mutex lock = ceph::make_mutex("PurgeQueue");
bool readonly = false;
int64_t metadata_pool;
// Don't use the MDSDaemon's Finisher and Timer, because this class
// operates outside of MDSDaemon::mds_lock
Finisher finisher;
SafeTimer timer;
Filer filer;
Objecter *objecter;
std::unique_ptr<PerfCounters> logger;
Journaler journaler;
Context *on_error;
// Map of Journaler offset to PurgeItem
std::map<uint64_t, PurgeItem> in_flight;
std::set<uint64_t> pending_expire;
// Throttled allowances
uint64_t ops_in_flight = 0;
// Dynamic op limit per MDS based on PG count
uint64_t max_purge_ops = 0;
// How many bytes were remaining when drain() was first called,
// used for indicating progress.
uint64_t drain_initial = 0;
// Has drain() ever been called on this instance?
bool draining = false;
// Do we currently have a flush timer event waiting?
Context *delayed_flush = nullptr;
bool recovered = false;
std::vector<Context*> waiting_for_recovery;
size_t purge_item_journal_size;
uint64_t ops_high_water = 0;
uint64_t files_high_water = 0;
};
#endif
| 6,369 | 24.48 | 93 | h |
null | ceph-main/src/mds/RecoveryQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
//class C_MDC_Recover;
//
#ifndef RECOVERY_QUEUE_H
#define RECOVERY_QUEUE_H
#include <set>
#include "include/common_fwd.h"
#include "osdc/Filer.h"
class CInode;
class MDSRank;
class RecoveryQueue {
public:
explicit RecoveryQueue(MDSRank *mds_);
void enqueue(CInode *in);
void advance();
void prioritize(CInode *in); ///< do this inode now/soon
void set_logger(PerfCounters *p) {logger=p;}
private:
friend class C_MDC_Recover;
void _start(CInode *in); ///< start recovering this file
void _recovered(CInode *in, int r, uint64_t size, utime_t mtime);
size_t file_recover_queue_size = 0;
size_t file_recover_queue_front_size = 0;
elist<CInode*> file_recover_queue; ///< the queue
elist<CInode*> file_recover_queue_front; ///< elevated priority items
std::map<CInode*, bool> file_recovering; // inode -> need_restart
MDSRank *mds;
PerfCounters *logger = nullptr;
Filer filer;
};
#endif // RECOVERY_QUEUE_H
| 1,380 | 23.22807 | 72 | h |
null | ceph-main/src/mds/ScatterLock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SCATTERLOCK_H
#define CEPH_SCATTERLOCK_H
#include "SimpleLock.h"
#include "MDSContext.h"
class ScatterLock : public SimpleLock {
public:
ScatterLock(MDSCacheObject *o, LockType *lt) :
SimpleLock(o, lt) {}
~ScatterLock() override {
ceph_assert(!_more);
}
bool is_scatterlock() const override {
return true;
}
bool is_sync_and_unlocked() const {
return
SimpleLock::is_sync_and_unlocked() &&
!is_dirty() &&
!is_flushing();
}
bool can_scatter_pin(client_t loner) {
/*
LOCK : NOT okay because it can MIX and force replicas to journal something
TSYN : also not okay for same reason
EXCL : also not okay
MIX : okay, replica can stall before sending AC_SYNCACK
SYNC : okay, replica can stall before sending AC_MIXACK or AC_LOCKACK
*/
return
get_state() == LOCK_SYNC ||
get_state() == LOCK_MIX;
}
void set_xlock_snap_sync(MDSContext *c)
{
ceph_assert(get_type() == CEPH_LOCK_IFILE);
ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE);
state = LOCK_XLOCKSNAP;
add_waiter(WAIT_STABLE, c);
}
xlist<ScatterLock*>::item *get_updated_item() { return &more()->item_updated; }
utime_t get_update_stamp() {
return _more ? _more->update_stamp : utime_t();
}
void set_update_stamp(utime_t t) { more()->update_stamp = t; }
void set_scatter_wanted() {
state_flags |= SCATTER_WANTED;
}
void set_unscatter_wanted() {
state_flags |= UNSCATTER_WANTED;
}
void clear_scatter_wanted() {
state_flags &= ~SCATTER_WANTED;
}
void clear_unscatter_wanted() {
state_flags &= ~UNSCATTER_WANTED;
}
bool get_scatter_wanted() const {
return state_flags & SCATTER_WANTED;
}
bool get_unscatter_wanted() const {
return state_flags & UNSCATTER_WANTED;
}
bool is_dirty() const override {
return state_flags & DIRTY;
}
bool is_flushing() const override {
return state_flags & FLUSHING;
}
bool is_flushed() const override {
return state_flags & FLUSHED;
}
bool is_dirty_or_flushing() const {
return is_dirty() || is_flushing();
}
void mark_dirty() {
if (!is_dirty()) {
if (!is_flushing())
parent->get(MDSCacheObject::PIN_DIRTYSCATTERED);
set_dirty();
}
}
void start_flush() {
if (is_dirty()) {
set_flushing();
clear_dirty();
}
}
void finish_flush() {
if (is_flushing()) {
clear_flushing();
set_flushed();
if (!is_dirty()) {
parent->put(MDSCacheObject::PIN_DIRTYSCATTERED);
parent->clear_dirty_scattered(get_type());
}
}
}
void clear_flushed() override {
state_flags &= ~FLUSHED;
}
void remove_dirty() {
start_flush();
finish_flush();
clear_flushed();
}
void infer_state_from_strong_rejoin(int rstate, bool locktoo) {
if (rstate == LOCK_MIX ||
rstate == LOCK_MIX_LOCK || // replica still has wrlocks?
rstate == LOCK_MIX_SYNC)
state = LOCK_MIX;
else if (locktoo && rstate == LOCK_LOCK)
state = LOCK_LOCK;
}
void encode_state_for_rejoin(ceph::buffer::list& bl, int rep) {
__s16 s = get_replica_state();
if (is_gathering(rep)) {
// the recovering mds may hold rejoined wrlocks
if (state == LOCK_MIX_SYNC)
s = LOCK_MIX_SYNC;
else
s = LOCK_MIX_LOCK;
}
// If there is a recovering mds who replcated an object when it failed
// and scatterlock in the object was in MIX state, It's possible that
// the recovering mds needs to take wrlock on the scatterlock when it
// replays unsafe requests. So this mds should delay taking rdlock on
// the scatterlock until the recovering mds finishes replaying unsafe.
// Otherwise unsafe requests may get replayed after current request.
//
// For example:
// The recovering mds is auth mds of a dirfrag, this mds is auth mds
// of corresponding inode. when 'rm -rf' the direcotry, this mds should
// delay the rmdir request until the recovering mds has replayed unlink
// requests.
if (s == LOCK_MIX || s == LOCK_MIX_LOCK || s == LOCK_MIX_SYNC)
mark_need_recover();
using ceph::encode;
encode(s, bl);
}
void decode_state_rejoin(ceph::buffer::list::const_iterator& p, MDSContext::vec& waiters, bool survivor) {
SimpleLock::decode_state_rejoin(p, waiters, survivor);
if (is_flushing()) {
set_dirty();
clear_flushing();
}
}
bool remove_replica(int from, bool rejoin) {
if (rejoin &&
(state == LOCK_MIX ||
state == LOCK_MIX_SYNC ||
state == LOCK_MIX_LOCK2 ||
state == LOCK_MIX_TSYN ||
state == LOCK_MIX_EXCL))
return false;
return SimpleLock::remove_replica(from);
}
void print(std::ostream& out) const override {
out << "(";
_print(out);
if (is_dirty())
out << " dirty";
if (is_flushing())
out << " flushing";
if (is_flushed())
out << " flushed";
if (get_scatter_wanted())
out << " scatter_wanted";
out << ")";
}
private:
struct more_bits_t {
xlist<ScatterLock*>::item item_updated;
utime_t update_stamp;
explicit more_bits_t(ScatterLock *lock) :
item_updated(lock)
{}
};
more_bits_t *more() {
if (!_more)
_more.reset(new more_bits_t(this));
return _more.get();
}
enum {
SCATTER_WANTED = 1 << 8,
UNSCATTER_WANTED = 1 << 9,
DIRTY = 1 << 10,
FLUSHING = 1 << 11,
FLUSHED = 1 << 12,
};
void set_flushing() {
state_flags |= FLUSHING;
}
void clear_flushing() {
state_flags &= ~FLUSHING;
}
void set_flushed() {
state_flags |= FLUSHED;
}
void set_dirty() {
state_flags |= DIRTY;
}
void clear_dirty() {
state_flags &= ~DIRTY;
if (_more) {
_more->item_updated.remove_myself();
_more.reset();
}
}
mutable std::unique_ptr<more_bits_t> _more;
};
#endif
| 6,337 | 23.854902 | 108 | h |
null | ceph-main/src/mds/ScrubHeader.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef SCRUB_HEADER_H_
#define SCRUB_HEADER_H_
#include <memory>
#include <string>
#include <string_view>
#include "include/ceph_assert.h"
namespace ceph {
class Formatter;
};
class CInode;
/**
* Externally input parameters for a scrub, associated with the root
* of where we are doing a recursive scrub
*/
class ScrubHeader {
public:
ScrubHeader(std::string_view tag_, bool is_tag_internal_, bool force_,
bool recursive_, bool repair_, bool scrub_mdsdir_ = false)
: tag(tag_), is_tag_internal(is_tag_internal_), force(force_),
recursive(recursive_), repair(repair_), scrub_mdsdir(scrub_mdsdir_) {}
// Set after construction because it won't be known until we've
// started resolving path and locking
void set_origin(inodeno_t ino) { origin = ino; }
bool get_recursive() const { return recursive; }
bool get_repair() const { return repair; }
bool get_force() const { return force; }
bool get_scrub_mdsdir() const { return scrub_mdsdir; }
bool is_internal_tag() const { return is_tag_internal; }
inodeno_t get_origin() const { return origin; }
const std::string& get_tag() const { return tag; }
bool get_repaired() const { return repaired; }
void set_repaired() { repaired = true; }
void set_epoch_last_forwarded(unsigned epoch) { epoch_last_forwarded = epoch; }
unsigned get_epoch_last_forwarded() const { return epoch_last_forwarded; }
void inc_num_pending() { ++num_pending; }
void dec_num_pending() {
ceph_assert(num_pending > 0);
--num_pending;
}
unsigned get_num_pending() const { return num_pending; }
protected:
const std::string tag;
bool is_tag_internal;
const bool force;
const bool recursive;
const bool repair;
const bool scrub_mdsdir;
inodeno_t origin;
bool repaired = false; // May be set during scrub if repairs happened
unsigned epoch_last_forwarded = 0;
unsigned num_pending = 0;
};
typedef std::shared_ptr<ScrubHeader> ScrubHeaderRef;
typedef std::shared_ptr<const ScrubHeader> ScrubHeaderRefConst;
#endif // SCRUB_HEADER_H_
| 2,457 | 27.917647 | 81 | h |
null | ceph-main/src/mds/ScrubStack.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef SCRUBSTACK_H_
#define SCRUBSTACK_H_
#include "CDir.h"
#include "CDentry.h"
#include "CInode.h"
#include "MDSContext.h"
#include "ScrubHeader.h"
#include "common/LogClient.h"
#include "include/elist.h"
#include "messages/MMDSScrub.h"
#include "messages/MMDSScrubStats.h"
class MDCache;
class Finisher;
class ScrubStack {
public:
ScrubStack(MDCache *mdc, LogChannelRef &clog, Finisher *finisher_) :
mdcache(mdc),
clog(clog),
finisher(finisher_),
scrub_stack(member_offset(MDSCacheObject, item_scrub)),
scrub_waiting(member_offset(MDSCacheObject, item_scrub)) {}
~ScrubStack() {
ceph_assert(scrub_stack.empty());
ceph_assert(!scrubs_in_progress);
}
/**
* Put the inode at either the top or bottom of the stack, with the
* given scrub params, and kick off more scrubbing.
* @param in The inode to scrub
* @param header The ScrubHeader propagated from wherever this scrub
*/
int enqueue(CInode *in, ScrubHeaderRef& header, bool top);
/**
* Abort an ongoing scrub operation. The abort operation could be
* delayed if there are in-progress scrub operations on going. The
* caller should provide a context which is completed after all
* in-progress scrub operations are completed and pending inodes
* are removed from the scrub stack (with the context callbacks for
* inodes completed with -CEPHFS_ECANCELED).
* @param on_finish Context callback to invoke after abort
*/
void scrub_abort(Context *on_finish);
/**
* Pause scrub operations. Similar to abort, pause is delayed if
* there are in-progress scrub operations on going. The caller
* should provide a context which is completed after all in-progress
* scrub operations are completed. Subsequent scrub operations are
* queued until scrub is resumed.
* @param on_finish Context callback to invoke after pause
*/
void scrub_pause(Context *on_finish);
/**
* Resume a paused scrub. Unlike abort or pause, this is instantaneous.
* Pending pause operations are cancelled (context callbacks are
* invoked with -CEPHFS_ECANCELED).
* @returns 0 (success) if resumed, -CEPHFS_EINVAL if an abort is in-progress.
*/
bool scrub_resume();
/**
* Get the current scrub status as human readable string. Some basic
* information is returned such as number of inodes pending abort/pause.
*/
void scrub_status(Formatter *f);
/**
* Get a high level scrub status summary such as current scrub state
* and scrub paths.
*/
std::string_view scrub_summary();
static bool is_idle(std::string_view state_str) {
return state_str == "idle";
}
bool is_scrubbing() const { return !scrub_stack.empty(); }
void advance_scrub_status();
void handle_mds_failure(mds_rank_t mds);
void dispatch(const cref_t<Message> &m);
bool remove_inode_if_stacked(CInode *in);
MDCache *mdcache;
protected:
// reference to global cluster log client
LogChannelRef &clog;
/// A finisher needed so that we don't re-enter kick_off_scrubs
Finisher *finisher;
/// The stack of inodes we want to scrub
elist<MDSCacheObject*> scrub_stack;
elist<MDSCacheObject*> scrub_waiting;
/// current number of dentries we're actually scrubbing
int scrubs_in_progress = 0;
int stack_size = 0;
struct scrub_remote_t {
std::string tag;
std::set<mds_rank_t> gather_set;
};
std::map<CInode*, scrub_remote_t> remote_scrubs;
unsigned scrub_epoch = 2;
unsigned scrub_epoch_fully_acked = 0;
unsigned scrub_epoch_last_abort = 2;
// check if any mds is aborting scrub after mds.0 starts
bool scrub_any_peer_aborting = true;
struct scrub_stat_t {
unsigned epoch_acked = 0;
std::set<std::string> scrubbing_tags;
bool aborting = false;
};
std::vector<scrub_stat_t> mds_scrub_stats;
std::map<std::string, ScrubHeaderRef> scrubbing_map;
friend class C_RetryScrub;
private:
// scrub abort is _not_ a state, rather it's an operation that's
// performed after in-progress scrubs are finished.
enum State {
STATE_RUNNING = 0,
STATE_IDLE,
STATE_PAUSING,
STATE_PAUSED,
};
friend std::ostream &operator<<(std::ostream &os, const State &state);
friend class C_InodeValidated;
int _enqueue(MDSCacheObject *obj, ScrubHeaderRef& header, bool top);
/**
* Remove the inode/dirfrag from the stack.
*/
inline void dequeue(MDSCacheObject *obj);
/**
* Kick off as many scrubs as are appropriate, based on the current
* state of the stack.
*/
void kick_off_scrubs();
/**
* Move the inode/dirfrag that can't be scrubbed immediately
* from scrub queue to waiting list.
*/
void add_to_waiting(MDSCacheObject *obj);
/**
* Move the inode/dirfrag back to scrub queue.
*/
void remove_from_waiting(MDSCacheObject *obj, bool kick=true);
/**
* Validate authority of the inode. If current mds is not auth of the inode,
* forword scrub to auth mds.
*/
bool validate_inode_auth(CInode *in);
/**
* Scrub a file inode.
* @param in The inode to scrub
*/
void scrub_file_inode(CInode *in);
/**
* Callback from completion of CInode::validate_disk_state
* @param in The inode we were validating
* @param r The return status from validate_disk_state
* @param result Populated results from validate_disk_state
*/
void _validate_inode_done(CInode *in, int r,
const CInode::validated_data &result);
/**
* Scrub a directory inode. It queues child dirfrags, then does
* final scrub of the inode.
*
* @param in The directory indoe to scrub
* @param added_children set to true if we pushed some of our children
* @param done set to true if we started to do final scrub
*/
void scrub_dir_inode(CInode *in, bool *added_children, bool *done);
/**
* Scrub a dirfrag. It queues child dentries, then does final
* scrub of the dirfrag.
*
* @param dir The dirfrag to scrub (must be auth)
* @param done set to true if we started to do final scrub
*/
void scrub_dirfrag(CDir *dir, bool *done);
/**
* Scrub a directory-representing dentry.
*
* @param in The directory inode we're doing final scrub on.
*/
void scrub_dir_inode_final(CInode *in);
/**
* Set scrub state
* @param next_state State to move the scrub to.
*/
void set_state(State next_state);
/**
* Is scrub in one of transition states (running, pausing)
*/
bool scrub_in_transition_state();
/**
* complete queued up contexts
* @param r return value to complete contexts.
*/
void complete_control_contexts(int r);
/**
* ask peer mds (rank > 0) to abort/pause/resume scrubs
*/
void send_state_message(int op);
/**
* Abort pending scrubs for inodes waiting in the inode stack.
* Completion context is complete with -CEPHFS_ECANCELED.
*/
void abort_pending_scrubs();
/**
* Return path for a given inode.
* @param in inode to make path entry.
*/
std::string scrub_inode_path(CInode *in) {
std::string path;
in->make_path_string(path, true);
return (path.empty() ? "/" : path.c_str());
}
/**
* Send scrub information (queued/finished scrub path and summary)
* to cluster log.
* @param in inode for which scrub has been queued or finished.
*/
void clog_scrub_summary(CInode *in=nullptr);
void handle_scrub(const cref_t<MMDSScrub> &m);
void handle_scrub_stats(const cref_t<MMDSScrubStats> &m);
State state = STATE_IDLE;
bool clear_stack = false;
// list of pending context completions for asynchronous scrub
// control operations.
std::vector<Context *> control_ctxs;
};
#endif /* SCRUBSTACK_H_ */
| 8,046 | 27.739286 | 80 | h |
null | ceph-main/src/mds/Server.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_SERVER_H
#define CEPH_MDS_SERVER_H
#include <string_view>
#include <common/DecayCounter.h>
#include "include/common_fwd.h"
#include "messages/MClientReconnect.h"
#include "messages/MClientReply.h"
#include "messages/MClientRequest.h"
#include "messages/MClientSession.h"
#include "messages/MClientSnap.h"
#include "messages/MClientReclaim.h"
#include "messages/MClientReclaimReply.h"
#include "messages/MLock.h"
#include "CInode.h"
#include "MDSRank.h"
#include "Mutation.h"
#include "MDSContext.h"
class OSDMap;
class LogEvent;
class EMetaBlob;
class EUpdate;
class MDLog;
struct SnapInfo;
class MetricsHandler;
enum {
l_mdss_first = 1000,
l_mdss_dispatch_client_request,
l_mdss_dispatch_peer_request,
l_mdss_handle_client_request,
l_mdss_handle_client_session,
l_mdss_handle_peer_request,
l_mdss_req_create_latency,
l_mdss_req_getattr_latency,
l_mdss_req_getfilelock_latency,
l_mdss_req_link_latency,
l_mdss_req_lookup_latency,
l_mdss_req_lookuphash_latency,
l_mdss_req_lookupino_latency,
l_mdss_req_lookupname_latency,
l_mdss_req_lookupparent_latency,
l_mdss_req_lookupsnap_latency,
l_mdss_req_lssnap_latency,
l_mdss_req_mkdir_latency,
l_mdss_req_mknod_latency,
l_mdss_req_mksnap_latency,
l_mdss_req_open_latency,
l_mdss_req_readdir_latency,
l_mdss_req_rename_latency,
l_mdss_req_renamesnap_latency,
l_mdss_req_snapdiff_latency,
l_mdss_req_rmdir_latency,
l_mdss_req_rmsnap_latency,
l_mdss_req_rmxattr_latency,
l_mdss_req_setattr_latency,
l_mdss_req_setdirlayout_latency,
l_mdss_req_setfilelock_latency,
l_mdss_req_setlayout_latency,
l_mdss_req_setxattr_latency,
l_mdss_req_symlink_latency,
l_mdss_req_unlink_latency,
l_mdss_cap_revoke_eviction,
l_mdss_cap_acquisition_throttle,
l_mdss_req_getvxattr_latency,
l_mdss_last,
};
class Server {
public:
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
enum class RecallFlags : uint64_t {
NONE = 0,
STEADY = (1<<0),
ENFORCE_MAX = (1<<1),
TRIM = (1<<2),
ENFORCE_LIVENESS = (1<<3),
};
explicit Server(MDSRank *m, MetricsHandler *metrics_handler);
~Server() {
g_ceph_context->get_perfcounters_collection()->remove(logger);
delete logger;
delete reconnect_done;
}
void create_logger();
// message handler
void dispatch(const cref_t<Message> &m);
void handle_osd_map();
// -- sessions and recovery --
bool waiting_for_reconnect(client_t c) const;
void dump_reconnect_status(Formatter *f) const;
time last_recalled() const {
return last_recall_state;
}
void handle_client_session(const cref_t<MClientSession> &m);
void _session_logged(Session *session, uint64_t state_seq, bool open, version_t pv,
const interval_set<inodeno_t>& inos_to_free, version_t piv,
const interval_set<inodeno_t>& inos_to_purge, LogSegment *ls);
version_t prepare_force_open_sessions(std::map<client_t,entity_inst_t> &cm,
std::map<client_t,client_metadata_t>& cmm,
std::map<client_t,std::pair<Session*,uint64_t> >& smap);
void finish_force_open_sessions(const std::map<client_t,std::pair<Session*,uint64_t> >& smap,
bool dec_import=true);
void flush_client_sessions(std::set<client_t>& client_set, MDSGatherBuilder& gather);
void finish_flush_session(Session *session, version_t seq);
void terminate_sessions();
void find_idle_sessions();
void kill_session(Session *session, Context *on_safe);
size_t apply_blocklist();
void journal_close_session(Session *session, int state, Context *on_safe);
size_t get_num_pending_reclaim() const { return client_reclaim_gather.size(); }
Session *find_session_by_uuid(std::string_view uuid);
void reclaim_session(Session *session, const cref_t<MClientReclaim> &m);
void finish_reclaim_session(Session *session, const ref_t<MClientReclaimReply> &reply=nullptr);
void handle_client_reclaim(const cref_t<MClientReclaim> &m);
void reconnect_clients(MDSContext *reconnect_done_);
void handle_client_reconnect(const cref_t<MClientReconnect> &m);
void infer_supported_features(Session *session, client_metadata_t& client_metadata);
void update_required_client_features();
//void process_reconnect_cap(CInode *in, int from, ceph_mds_cap_reconnect& capinfo);
void reconnect_gather_finish();
void reconnect_tick();
void recover_filelocks(CInode *in, bufferlist locks, int64_t client);
std::pair<bool, uint64_t> recall_client_state(MDSGatherBuilder* gather, RecallFlags=RecallFlags::NONE);
void force_clients_readonly();
// -- requests --
void handle_client_request(const cref_t<MClientRequest> &m);
void handle_client_reply(const cref_t<MClientReply> &m);
void journal_and_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn,
LogEvent *le, MDSLogContextBase *fin);
void submit_mdlog_entry(LogEvent *le, MDSLogContextBase *fin,
MDRequestRef& mdr, std::string_view event);
void dispatch_client_request(MDRequestRef& mdr);
void perf_gather_op_latency(const cref_t<MClientRequest> &req, utime_t lat);
void early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn);
void respond_to_request(MDRequestRef& mdr, int r = 0);
void set_trace_dist(const ref_t<MClientReply> &reply, CInode *in, CDentry *dn,
MDRequestRef& mdr);
void handle_peer_request(const cref_t<MMDSPeerRequest> &m);
void handle_peer_request_reply(const cref_t<MMDSPeerRequest> &m);
void dispatch_peer_request(MDRequestRef& mdr);
void handle_peer_auth_pin(MDRequestRef& mdr);
void handle_peer_auth_pin_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack);
// some helpers
bool check_fragment_space(MDRequestRef& mdr, CDir *in);
bool check_dir_max_entries(MDRequestRef& mdr, CDir *in);
bool check_access(MDRequestRef& mdr, CInode *in, unsigned mask);
bool _check_access(Session *session, CInode *in, unsigned mask, int caller_uid, int caller_gid, int setattr_uid, int setattr_gid);
CDentry *prepare_stray_dentry(MDRequestRef& mdr, CInode *in);
CInode* prepare_new_inode(MDRequestRef& mdr, CDir *dir, inodeno_t useino, unsigned mode,
const file_layout_t *layout=nullptr);
void journal_allocated_inos(MDRequestRef& mdr, EMetaBlob *blob);
void apply_allocated_inos(MDRequestRef& mdr, Session *session);
void _try_open_ino(MDRequestRef& mdr, int r, inodeno_t ino);
CInode* rdlock_path_pin_ref(MDRequestRef& mdr, bool want_auth,
bool no_want_auth=false);
CDentry* rdlock_path_xlock_dentry(MDRequestRef& mdr, bool create,
bool okexist=false, bool authexist=false,
bool want_layout=false);
std::pair<CDentry*, CDentry*>
rdlock_two_paths_xlock_destdn(MDRequestRef& mdr, bool xlock_srcdn);
CDir* try_open_auth_dirfrag(CInode *diri, frag_t fg, MDRequestRef& mdr);
// requests on existing inodes.
void handle_client_getattr(MDRequestRef& mdr, bool is_lookup);
void handle_client_lookup_ino(MDRequestRef& mdr,
bool want_parent, bool want_dentry);
void _lookup_snap_ino(MDRequestRef& mdr);
void _lookup_ino_2(MDRequestRef& mdr, int r);
void handle_client_readdir(MDRequestRef& mdr);
void handle_client_file_setlock(MDRequestRef& mdr);
void handle_client_file_readlock(MDRequestRef& mdr);
bool xlock_policylock(MDRequestRef& mdr, CInode *in,
bool want_layout=false, bool xlock_snaplock=false);
CInode* try_get_auth_inode(MDRequestRef& mdr, inodeno_t ino);
void handle_client_setattr(MDRequestRef& mdr);
void handle_client_setlayout(MDRequestRef& mdr);
void handle_client_setdirlayout(MDRequestRef& mdr);
int parse_quota_vxattr(std::string name, std::string value, quota_info_t *quota);
void create_quota_realm(CInode *in);
int parse_layout_vxattr_json(std::string name, std::string value,
const OSDMap& osdmap, file_layout_t *layout);
int parse_layout_vxattr_string(std::string name, std::string value, const OSDMap& osdmap,
file_layout_t *layout);
int parse_layout_vxattr(std::string name, std::string value, const OSDMap& osdmap,
file_layout_t *layout, bool validate=true);
int check_layout_vxattr(MDRequestRef& mdr,
std::string name,
std::string value,
file_layout_t *layout);
void handle_set_vxattr(MDRequestRef& mdr, CInode *cur);
void handle_remove_vxattr(MDRequestRef& mdr, CInode *cur);
void handle_client_getvxattr(MDRequestRef& mdr);
void handle_client_setxattr(MDRequestRef& mdr);
void handle_client_removexattr(MDRequestRef& mdr);
void handle_client_fsync(MDRequestRef& mdr);
bool is_unlink_pending(CDentry *dn);
void wait_for_pending_unlink(CDentry *dn, MDRequestRef& mdr);
bool is_reintegrate_pending(CDentry *dn);
void wait_for_pending_reintegrate(CDentry *dn, MDRequestRef& mdr);
// open
void handle_client_open(MDRequestRef& mdr);
void handle_client_openc(MDRequestRef& mdr); // O_CREAT variant.
void do_open_truncate(MDRequestRef& mdr, int cmode); // O_TRUNC variant.
// namespace changes
void handle_client_mknod(MDRequestRef& mdr);
void handle_client_mkdir(MDRequestRef& mdr);
void handle_client_symlink(MDRequestRef& mdr);
// link
void handle_client_link(MDRequestRef& mdr);
void _link_local(MDRequestRef& mdr, CDentry *dn, CInode *targeti, SnapRealm *target_realm);
void _link_local_finish(MDRequestRef& mdr, CDentry *dn, CInode *targeti,
version_t, version_t, bool);
void _link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targeti);
void _link_remote_finish(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targeti,
version_t);
void handle_peer_link_prep(MDRequestRef& mdr);
void _logged_peer_link(MDRequestRef& mdr, CInode *targeti, bool adjust_realm);
void _commit_peer_link(MDRequestRef& mdr, int r, CInode *targeti);
void _committed_peer(MDRequestRef& mdr); // use for rename, too
void handle_peer_link_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &m);
void do_link_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr);
void _link_rollback_finish(MutationRef& mut, MDRequestRef& mdr,
std::map<client_t,ref_t<MClientSnap>>& split);
// unlink
void handle_client_unlink(MDRequestRef& mdr);
bool _dir_is_nonempty_unlocked(MDRequestRef& mdr, CInode *rmdiri);
bool _dir_is_nonempty(MDRequestRef& mdr, CInode *rmdiri);
void _unlink_local(MDRequestRef& mdr, CDentry *dn, CDentry *straydn);
void _unlink_local_finish(MDRequestRef& mdr,
CDentry *dn, CDentry *straydn,
version_t);
bool _rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, std::vector<CDentry*>& trace, CDentry *straydn);
void handle_peer_rmdir_prep(MDRequestRef& mdr);
void _logged_peer_rmdir(MDRequestRef& mdr, CDentry *srcdn, CDentry *straydn);
void _commit_peer_rmdir(MDRequestRef& mdr, int r, CDentry *straydn);
void handle_peer_rmdir_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &ack);
void do_rmdir_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr);
void _rmdir_rollback_finish(MDRequestRef& mdr, metareqid_t reqid, CDentry *dn, CDentry *straydn);
// rename
void handle_client_rename(MDRequestRef& mdr);
void _rename_finish(MDRequestRef& mdr,
CDentry *srcdn, CDentry *destdn, CDentry *straydn);
void handle_client_lssnap(MDRequestRef& mdr);
void handle_client_mksnap(MDRequestRef& mdr);
void _mksnap_finish(MDRequestRef& mdr, CInode *diri, SnapInfo &info);
void handle_client_rmsnap(MDRequestRef& mdr);
void _rmsnap_finish(MDRequestRef& mdr, CInode *diri, snapid_t snapid);
void handle_client_renamesnap(MDRequestRef& mdr);
void _renamesnap_finish(MDRequestRef& mdr, CInode *diri, snapid_t snapid);
void handle_client_readdir_snapdiff(MDRequestRef& mdr);
// helpers
bool _rename_prepare_witness(MDRequestRef& mdr, mds_rank_t who, std::set<mds_rank_t> &witnesse,
std::vector<CDentry*>& srctrace, std::vector<CDentry*>& dsttrace, CDentry *straydn);
version_t _rename_prepare_import(MDRequestRef& mdr, CDentry *srcdn, bufferlist *client_map_bl);
bool _need_force_journal(CInode *diri, bool empty);
void _rename_prepare(MDRequestRef& mdr,
EMetaBlob *metablob, bufferlist *client_map_bl,
CDentry *srcdn, CDentry *destdn, std::string_view alternate_name,
CDentry *straydn);
/* set not_journaling=true if you're going to discard the results --
* this bypasses the asserts to make sure we're journaling the right
* things on the right nodes */
void _rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
// slaving
void handle_peer_rename_prep(MDRequestRef& mdr);
void handle_peer_rename_prep_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &m);
void handle_peer_rename_notify_ack(MDRequestRef& mdr, const cref_t<MMDSPeerRequest> &m);
void _peer_rename_sessions_flushed(MDRequestRef& mdr);
void _logged_peer_rename(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
void _commit_peer_rename(MDRequestRef& mdr, int r, CDentry *srcdn, CDentry *destdn, CDentry *straydn);
void do_rename_rollback(bufferlist &rbl, mds_rank_t leader, MDRequestRef& mdr, bool finish_mdr=false);
void _rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentry *srcdn, version_t srcdnpv,
CDentry *destdn, CDentry *staydn, std::map<client_t,ref_t<MClientSnap>> splits[2],
bool finish_mdr);
void evict_cap_revoke_non_responders();
void handle_conf_change(const std::set<std::string>& changed);
bool terminating_sessions = false;
std::set<client_t> client_reclaim_gather;
std::set<client_t> get_laggy_clients() const {
return laggy_clients;
}
void clear_laggy_clients() {
laggy_clients.clear();
}
const bufferlist& get_snap_trace(Session *session, SnapRealm *realm) const;
const bufferlist& get_snap_trace(client_t client, SnapRealm *realm) const;
private:
friend class MDSContinuation;
friend class ServerContext;
friend class ServerLogContext;
friend class Batch_Getattr_Lookup;
// placeholder for validation handler to store xattr specific
// data
struct XattrInfo {
virtual ~XattrInfo() {
}
};
struct MirrorXattrInfo : XattrInfo {
std::string cluster_id;
std::string fs_id;
static const std::string MIRROR_INFO_REGEX;
static const std::string CLUSTER_ID;
static const std::string FS_ID;
MirrorXattrInfo(std::string_view cluster_id,
std::string_view fs_id)
: cluster_id(cluster_id),
fs_id(fs_id) {
}
};
struct XattrOp {
int op;
std::string xattr_name;
const bufferlist &xattr_value;
int flags = 0;
std::unique_ptr<XattrInfo> xinfo;
XattrOp(int op, std::string_view xattr_name, const bufferlist &xattr_value, int flags)
: op(op),
xattr_name(xattr_name),
xattr_value(xattr_value),
flags (flags) {
}
};
struct XattrHandler {
const std::string xattr_name;
const std::string description;
// basic checks are to be done in this handler. return -errno to
// reject xattr request (set or remove), zero to proceed. handlers
// may parse xattr value for verification if needed and have an
// option to store custom data in XattrOp::xinfo.
int (Server::*validate)(CInode *cur, const InodeStoreBase::xattr_map_const_ptr xattrs,
XattrOp *xattr_op);
// set xattr for an inode in xattr_map
void (Server::*setxattr)(CInode *cur, InodeStoreBase::xattr_map_ptr xattrs,
const XattrOp &xattr_op);
// remove xattr for an inode from xattr_map
void (Server::*removexattr)(CInode *cur, InodeStoreBase::xattr_map_ptr xattrs,
const XattrOp &xattr_op);
};
inline static const std::string DEFAULT_HANDLER = "<default>";
static const XattrHandler xattr_handlers[];
const XattrHandler* get_xattr_or_default_handler(std::string_view xattr_name);
// generic variant to set/remove xattr in/from xattr_map
int xattr_validate(CInode *cur, const InodeStoreBase::xattr_map_const_ptr xattrs,
const std::string &xattr_name, int op, int flags);
void xattr_set(InodeStoreBase::xattr_map_ptr xattrs, const std::string &xattr_name,
const bufferlist &xattr_value);
void xattr_rm(InodeStoreBase::xattr_map_ptr xattrs, const std::string &xattr_name);
// default xattr handlers
int default_xattr_validate(CInode *cur, const InodeStoreBase::xattr_map_const_ptr xattrs,
XattrOp *xattr_op);
void default_setxattr_handler(CInode *cur, InodeStoreBase::xattr_map_ptr xattrs,
const XattrOp &xattr_op);
void default_removexattr_handler(CInode *cur, InodeStoreBase::xattr_map_ptr xattrs,
const XattrOp &xattr_op);
// mirror info xattr handler
int parse_mirror_info_xattr(const std::string &name, const std::string &value,
std::string &cluster_id, std::string &fs_id);
int mirror_info_xattr_validate(CInode *cur, const InodeStoreBase::xattr_map_const_ptr xattrs,
XattrOp *xattr_op);
void mirror_info_setxattr_handler(CInode *cur, InodeStoreBase::xattr_map_ptr xattrs,
const XattrOp &xattr_op);
void mirror_info_removexattr_handler(CInode *cur, InodeStoreBase::xattr_map_ptr xattrs,
const XattrOp &xattr_op);
static bool is_ceph_vxattr(std::string_view xattr_name) {
return xattr_name.rfind("ceph.dir.layout", 0) == 0 ||
xattr_name.rfind("ceph.file.layout", 0) == 0 ||
xattr_name.rfind("ceph.quota", 0) == 0 ||
xattr_name == "ceph.dir.subvolume" ||
xattr_name == "ceph.dir.pin" ||
xattr_name == "ceph.dir.pin.random" ||
xattr_name == "ceph.dir.pin.distributed";
}
static bool is_ceph_dir_vxattr(std::string_view xattr_name) {
return (xattr_name == "ceph.dir.layout" ||
xattr_name == "ceph.dir.layout.json" ||
xattr_name == "ceph.dir.layout.object_size" ||
xattr_name == "ceph.dir.layout.stripe_unit" ||
xattr_name == "ceph.dir.layout.stripe_count" ||
xattr_name == "ceph.dir.layout.pool" ||
xattr_name == "ceph.dir.layout.pool_name" ||
xattr_name == "ceph.dir.layout.pool_id" ||
xattr_name == "ceph.dir.layout.pool_namespace" ||
xattr_name == "ceph.dir.pin" ||
xattr_name == "ceph.dir.pin.random" ||
xattr_name == "ceph.dir.pin.distributed");
}
static bool is_ceph_file_vxattr(std::string_view xattr_name) {
return (xattr_name == "ceph.file.layout" ||
xattr_name == "ceph.file.layout.json" ||
xattr_name == "ceph.file.layout.object_size" ||
xattr_name == "ceph.file.layout.stripe_unit" ||
xattr_name == "ceph.file.layout.stripe_count" ||
xattr_name == "ceph.file.layout.pool" ||
xattr_name == "ceph.file.layout.pool_name" ||
xattr_name == "ceph.file.layout.pool_id" ||
xattr_name == "ceph.file.layout.pool_namespace");
}
static bool is_allowed_ceph_xattr(std::string_view xattr_name) {
// not a ceph xattr -- allow!
if (xattr_name.rfind("ceph.", 0) != 0) {
return true;
}
return xattr_name == "ceph.mirror.info" ||
xattr_name == "ceph.mirror.dirty_snap_id";
}
void reply_client_request(MDRequestRef& mdr, const ref_t<MClientReply> &reply);
void flush_session(Session *session, MDSGatherBuilder& gather);
void _finalize_readdir(MDRequestRef& mdr,
CInode *diri,
CDir* dir,
bool start,
bool end,
__u16 flags,
__u32 numfiles,
bufferlist& dirbl,
bufferlist& dnbl);
void _readdir_diff(
utime_t now,
MDRequestRef& mdr,
CInode* diri,
CDir* dir,
SnapRealm* realm,
unsigned max_entries,
int bytes_left,
const std::string& offset_str,
uint32_t offset_hash,
unsigned req_flags,
bufferlist& dirbl);
bool build_snap_diff(
MDRequestRef& mdr,
CDir* dir,
int bytes_left,
dentry_key_t* skip_key,
snapid_t snapid_before,
snapid_t snapid,
const bufferlist& dnbl,
std::function<bool(CDentry*, CInode*, bool)> add_result_cb);
MDSRank *mds;
MDCache *mdcache;
MDLog *mdlog;
PerfCounters *logger = nullptr;
// OSDMap full status, used to generate CEPHFS_ENOSPC on some operations
bool is_full = false;
// State for while in reconnect
MDSContext *reconnect_done = nullptr;
int failed_reconnects = 0;
bool reconnect_evicting = false; // true if I am waiting for evictions to complete
// before proceeding to reconnect_gather_finish
time reconnect_start = clock::zero();
time reconnect_last_seen = clock::zero();
std::set<client_t> client_reconnect_gather; // clients i need a reconnect msg from.
std::set<client_t> client_reconnect_denied; // clients whose reconnect msg have been denied .
feature_bitset_t supported_features;
feature_bitset_t supported_metric_spec;
feature_bitset_t required_client_features;
bool forward_all_requests_to_auth = false;
bool replay_unsafe_with_closed_session = false;
double cap_revoke_eviction_timeout = 0;
uint64_t max_snaps_per_dir = 100;
// long snapshot names have the following format: "_<SNAPSHOT-NAME>_<INODE-NUMBER>"
uint64_t snapshot_name_max = NAME_MAX - 1 - 1 - 13;
unsigned delegate_inos_pct = 0;
uint64_t dir_max_entries = 0;
int64_t bal_fragment_size_max = 0;
double inject_rename_corrupt_dentry_first = 0.0;
DecayCounter recall_throttle;
time last_recall_state;
MetricsHandler *metrics_handler;
// Cache cap acquisition throttle configs
uint64_t max_caps_per_client;
uint64_t cap_acquisition_throttle;
double max_caps_throttle_ratio;
double caps_throttle_retry_request_timeout;
size_t alternate_name_max = g_conf().get_val<Option::size_t>("mds_alternate_name_max");
size_t fscrypt_last_block_max_size = g_conf().get_val<Option::size_t>("mds_fscrypt_last_block_max_size");
// record laggy clients due to laggy OSDs
std::set<client_t> laggy_clients;
};
static inline constexpr auto operator|(Server::RecallFlags a, Server::RecallFlags b) {
using T = std::underlying_type<Server::RecallFlags>::type;
return static_cast<Server::RecallFlags>(static_cast<T>(a) | static_cast<T>(b));
}
static inline constexpr auto operator&(Server::RecallFlags a, Server::RecallFlags b) {
using T = std::underlying_type<Server::RecallFlags>::type;
return static_cast<Server::RecallFlags>(static_cast<T>(a) & static_cast<T>(b));
}
static inline std::ostream& operator<<(std::ostream& os, const Server::RecallFlags& f) {
using T = std::underlying_type<Server::RecallFlags>::type;
return os << "0x" << std::hex << static_cast<T>(f) << std::dec;
}
static inline constexpr bool operator!(const Server::RecallFlags& f) {
using T = std::underlying_type<Server::RecallFlags>::type;
return static_cast<T>(f) == static_cast<T>(0);
}
#endif
| 23,647 | 38.945946 | 132 | h |
null | ceph-main/src/mds/SessionMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_SESSIONMAP_H
#define CEPH_MDS_SESSIONMAP_H
#include <set>
#include "include/unordered_map.h"
#include "include/Context.h"
#include "include/xlist.h"
#include "include/elist.h"
#include "include/interval_set.h"
#include "mdstypes.h"
#include "mds/MDSAuthCaps.h"
#include "common/perf_counters.h"
#include "common/DecayCounter.h"
#include "CInode.h"
#include "Capability.h"
#include "MDSContext.h"
#include "msg/Message.h"
struct MDRequestImpl;
enum {
l_mdssm_first = 5500,
l_mdssm_session_count,
l_mdssm_session_add,
l_mdssm_session_remove,
l_mdssm_session_open,
l_mdssm_session_stale,
l_mdssm_total_load,
l_mdssm_avg_load,
l_mdssm_avg_session_uptime,
l_mdssm_last,
};
class CInode;
/*
* session
*/
class Session : public RefCountedObject {
// -- state etc --
public:
/*
<deleted> <-- closed <------------+
^ | |
| v |
killing <-- opening <----+ |
^ | | |
| v | |
stale <--> open --> closing ---+
+ additional dimension of 'importing' (with counter)
*/
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
enum {
STATE_CLOSED = 0,
STATE_OPENING = 1, // journaling open
STATE_OPEN = 2,
STATE_CLOSING = 3, // journaling close
STATE_STALE = 4,
STATE_KILLING = 5
};
Session() = delete;
Session(ConnectionRef con) :
item_session_list(this),
requests(member_offset(MDRequestImpl, item_session_request)),
recall_caps(g_conf().get_val<double>("mds_recall_warning_decay_rate")),
release_caps(g_conf().get_val<double>("mds_recall_warning_decay_rate")),
recall_caps_throttle(g_conf().get_val<double>("mds_recall_max_decay_rate")),
recall_caps_throttle2o(0.5),
session_cache_liveness(g_conf().get_val<double>("mds_session_cache_liveness_decay_rate")),
cap_acquisition(g_conf().get_val<double>("mds_session_cap_acquisition_decay_rate")),
birth_time(clock::now())
{
set_connection(std::move(con));
}
~Session() override {
ceph_assert(!item_session_list.is_on_list());
preopen_out_queue.clear();
}
static std::string_view get_state_name(int s) {
switch (s) {
case STATE_CLOSED: return "closed";
case STATE_OPENING: return "opening";
case STATE_OPEN: return "open";
case STATE_CLOSING: return "closing";
case STATE_STALE: return "stale";
case STATE_KILLING: return "killing";
default: return "???";
}
}
void dump(ceph::Formatter *f, bool cap_dump=false) const;
void push_pv(version_t pv)
{
ceph_assert(projected.empty() || projected.back() != pv);
projected.push_back(pv);
}
void pop_pv(version_t v)
{
ceph_assert(!projected.empty());
ceph_assert(projected.front() == v);
projected.pop_front();
}
int get_state() const { return state; }
void set_state(int new_state)
{
if (state != new_state) {
state = new_state;
state_seq++;
}
}
void set_reconnecting(bool s) { reconnecting = s; }
void decode(ceph::buffer::list::const_iterator &p);
template<typename T>
void set_client_metadata(T&& meta)
{
info.client_metadata = std::forward<T>(meta);
_update_human_name();
}
const std::string& get_human_name() const {return human_name;}
size_t get_request_count() const;
void notify_cap_release(size_t n_caps);
uint64_t notify_recall_sent(size_t new_limit);
auto get_recall_caps_throttle() const {
return recall_caps_throttle.get();
}
auto get_recall_caps_throttle2o() const {
return recall_caps_throttle2o.get();
}
auto get_recall_caps() const {
return recall_caps.get();
}
auto get_release_caps() const {
return release_caps.get();
}
auto get_session_cache_liveness() const {
return session_cache_liveness.get();
}
auto get_cap_acquisition() const {
return cap_acquisition.get();
}
inodeno_t take_ino(inodeno_t ino = 0) {
if (ino) {
if (!info.prealloc_inos.contains(ino))
return 0;
if (delegated_inos.contains(ino)) {
delegated_inos.erase(ino);
} else if (free_prealloc_inos.contains(ino)) {
free_prealloc_inos.erase(ino);
} else {
ceph_assert(0);
}
} else if (!free_prealloc_inos.empty()) {
ino = free_prealloc_inos.range_start();
free_prealloc_inos.erase(ino);
}
return ino;
}
void delegate_inos(int want, interval_set<inodeno_t>& inos) {
want -= (int)delegated_inos.size();
if (want <= 0)
return;
for (auto it = free_prealloc_inos.begin(); it != free_prealloc_inos.end(); ) {
if (want < (int)it.get_len()) {
inos.insert(it.get_start(), (inodeno_t)want);
delegated_inos.insert(it.get_start(), (inodeno_t)want);
free_prealloc_inos.erase(it.get_start(), (inodeno_t)want);
break;
}
want -= (int)it.get_len();
inos.insert(it.get_start(), it.get_len());
delegated_inos.insert(it.get_start(), it.get_len());
free_prealloc_inos.erase(it++);
if (want <= 0)
break;
}
}
// sans any delegated ones
int get_num_prealloc_inos() const {
return free_prealloc_inos.size();
}
int get_num_projected_prealloc_inos() const {
return get_num_prealloc_inos() + pending_prealloc_inos.size();
}
client_t get_client() const {
return info.get_client();
}
std::string_view get_state_name() const { return get_state_name(state); }
uint64_t get_state_seq() const { return state_seq; }
bool is_closed() const { return state == STATE_CLOSED; }
bool is_opening() const { return state == STATE_OPENING; }
bool is_open() const { return state == STATE_OPEN; }
bool is_closing() const { return state == STATE_CLOSING; }
bool is_stale() const { return state == STATE_STALE; }
bool is_killing() const { return state == STATE_KILLING; }
void inc_importing() {
++importing_count;
}
void dec_importing() {
ceph_assert(importing_count > 0);
--importing_count;
}
bool is_importing() const { return importing_count > 0; }
void set_load_avg_decay_rate(double rate) {
ceph_assert(is_open() || is_stale());
load_avg = DecayCounter(rate);
}
uint64_t get_load_avg() const {
return (uint64_t)load_avg.get();
}
void hit_session() {
load_avg.adjust();
}
double get_session_uptime() const {
std::chrono::duration<double> uptime = clock::now() - birth_time;
return uptime.count();
}
time get_birth_time() const {
return birth_time;
}
void inc_cap_gen() { ++cap_gen; }
uint32_t get_cap_gen() const { return cap_gen; }
version_t inc_push_seq() { return ++cap_push_seq; }
version_t get_push_seq() const { return cap_push_seq; }
version_t wait_for_flush(MDSContext* c) {
waitfor_flush[get_push_seq()].push_back(c);
return get_push_seq();
}
void finish_flush(version_t seq, MDSContext::vec& ls) {
while (!waitfor_flush.empty()) {
auto it = waitfor_flush.begin();
if (it->first > seq)
break;
auto& v = it->second;
ls.insert(ls.end(), v.begin(), v.end());
waitfor_flush.erase(it);
}
}
void touch_readdir_cap(uint32_t count) {
cap_acquisition.hit(count);
}
void touch_cap(Capability *cap) {
session_cache_liveness.hit(1.0);
caps.push_front(&cap->item_session_caps);
}
void touch_cap_bottom(Capability *cap) {
session_cache_liveness.hit(1.0);
caps.push_back(&cap->item_session_caps);
}
void touch_lease(ClientLease *r) {
session_cache_liveness.hit(1.0);
leases.push_back(&r->item_session_lease);
}
bool is_any_flush_waiter() {
return !waitfor_flush.empty();
}
void add_completed_request(ceph_tid_t t, inodeno_t created) {
info.completed_requests[t] = created;
completed_requests_dirty = true;
}
bool trim_completed_requests(ceph_tid_t mintid) {
// trim
bool erased_any = false;
last_trim_completed_requests_tid = mintid;
while (!info.completed_requests.empty() &&
(mintid == 0 || info.completed_requests.begin()->first < mintid)) {
info.completed_requests.erase(info.completed_requests.begin());
erased_any = true;
}
if (erased_any) {
completed_requests_dirty = true;
}
return erased_any;
}
bool have_completed_request(ceph_tid_t tid, inodeno_t *pcreated) const {
auto p = info.completed_requests.find(tid);
if (p == info.completed_requests.end())
return false;
if (pcreated)
*pcreated = p->second;
return true;
}
void add_completed_flush(ceph_tid_t tid) {
info.completed_flushes.insert(tid);
}
bool trim_completed_flushes(ceph_tid_t mintid) {
bool erased_any = false;
last_trim_completed_flushes_tid = mintid;
while (!info.completed_flushes.empty() &&
(mintid == 0 || *info.completed_flushes.begin() < mintid)) {
info.completed_flushes.erase(info.completed_flushes.begin());
erased_any = true;
}
if (erased_any) {
completed_requests_dirty = true;
}
return erased_any;
}
bool have_completed_flush(ceph_tid_t tid) const {
return info.completed_flushes.count(tid);
}
uint64_t get_num_caps() const {
return caps.size();
}
unsigned get_num_completed_flushes() const { return info.completed_flushes.size(); }
unsigned get_num_trim_flushes_warnings() const {
return num_trim_flushes_warnings;
}
void inc_num_trim_flushes_warnings() { ++num_trim_flushes_warnings; }
void reset_num_trim_flushes_warnings() { num_trim_flushes_warnings = 0; }
unsigned get_num_completed_requests() const { return info.completed_requests.size(); }
unsigned get_num_trim_requests_warnings() const {
return num_trim_requests_warnings;
}
void inc_num_trim_requests_warnings() { ++num_trim_requests_warnings; }
void reset_num_trim_requests_warnings() { num_trim_requests_warnings = 0; }
bool has_dirty_completed_requests() const
{
return completed_requests_dirty;
}
void clear_dirty_completed_requests()
{
completed_requests_dirty = false;
}
int check_access(CInode *in, unsigned mask, int caller_uid, int caller_gid,
const std::vector<uint64_t> *gid_list, int new_uid, int new_gid);
bool fs_name_capable(std::string_view fs_name, unsigned mask) const {
return auth_caps.fs_name_capable(fs_name, mask);
}
void set_connection(ConnectionRef con) {
connection = std::move(con);
auto& c = connection;
if (c) {
info.auth_name = c->get_peer_entity_name();
info.inst.addr = c->get_peer_socket_addr();
info.inst.name = entity_name_t(c->get_peer_type(), c->get_peer_global_id());
}
}
const ConnectionRef& get_connection() const {
return connection;
}
void clear() {
pending_prealloc_inos.clear();
free_prealloc_inos.clear();
delegated_inos.clear();
info.clear_meta();
cap_push_seq = 0;
last_cap_renew = clock::zero();
}
Session *reclaiming_from = nullptr;
session_info_t info; ///< durable bits
MDSAuthCaps auth_caps;
xlist<Session*>::item item_session_list;
std::list<ceph::ref_t<Message>> preopen_out_queue; ///< messages for client, queued before they connect
/* This is mutable to allow get_request_count to be const. elist does not
* support const iterators yet.
*/
mutable elist<MDRequestImpl*> requests;
interval_set<inodeno_t> pending_prealloc_inos; // journaling prealloc, will be added to prealloc_inos
interval_set<inodeno_t> free_prealloc_inos; //
interval_set<inodeno_t> delegated_inos; // hand these out to client
xlist<Capability*> caps; // inodes with caps; front=most recently used
xlist<ClientLease*> leases; // metadata leases to clients
time last_cap_renew = clock::zero();
time last_seen = clock::zero();
// -- leases --
uint32_t lease_seq = 0;
protected:
ConnectionRef connection;
private:
friend class SessionMap;
// Human (friendly) name is soft state generated from client metadata
void _update_human_name();
int state = STATE_CLOSED;
bool reconnecting = false;
uint64_t state_seq = 0;
int importing_count = 0;
std::string human_name;
// Versions in this session was projected: used to verify
// that appropriate mark_dirty calls follow.
std::deque<version_t> projected;
// request load average for this session
DecayCounter load_avg;
// Ephemeral state for tracking progress of capability recalls
// caps being recalled recently by this session; used for Beacon warnings
DecayCounter recall_caps; // caps that have been released
DecayCounter release_caps;
// throttle on caps recalled
DecayCounter recall_caps_throttle;
// second order throttle that prevents recalling too quickly
DecayCounter recall_caps_throttle2o;
// New limit in SESSION_RECALL
uint32_t recall_limit = 0;
// session caps liveness
DecayCounter session_cache_liveness;
// cap acquisition via readdir
DecayCounter cap_acquisition;
// session start time -- used to track average session time
// note that this is initialized in the constructor rather
// than at the time of adding a session to the sessionmap
// as journal replay of sessionmap will not call add_session().
time birth_time;
// -- caps --
uint32_t cap_gen = 0;
version_t cap_push_seq = 0; // cap push seq #
std::map<version_t, MDSContext::vec > waitfor_flush; // flush session messages
// Has completed_requests been modified since the last time we
// wrote this session out?
bool completed_requests_dirty = false;
unsigned num_trim_flushes_warnings = 0;
unsigned num_trim_requests_warnings = 0;
ceph_tid_t last_trim_completed_requests_tid = 0;
ceph_tid_t last_trim_completed_flushes_tid = 0;
};
class SessionFilter
{
public:
SessionFilter() : reconnecting(false, false) {}
bool match(
const Session &session,
std::function<bool(client_t)> is_reconnecting) const;
int parse(const std::vector<std::string> &args, std::ostream *ss);
void set_reconnecting(bool v)
{
reconnecting.first = true;
reconnecting.second = v;
}
std::map<std::string, std::string> metadata;
std::string auth_name;
std::string state;
int64_t id = 0;
protected:
// First is whether to filter, second is filter value
std::pair<bool, bool> reconnecting;
};
/*
* session map
*/
class MDSRank;
/**
* Encapsulate the serialized state associated with SessionMap. Allows
* encode/decode outside of live MDS instance.
*/
class SessionMapStore {
public:
using clock = Session::clock;
using time = Session::time;
SessionMapStore(): total_load_avg(decay_rate) {}
virtual ~SessionMapStore() {};
version_t get_version() const {return version;}
virtual void encode_header(ceph::buffer::list *header_bl);
virtual void decode_header(ceph::buffer::list &header_bl);
virtual void decode_values(std::map<std::string, ceph::buffer::list> &session_vals);
virtual void decode_legacy(ceph::buffer::list::const_iterator& blp);
void dump(ceph::Formatter *f) const;
void set_rank(mds_rank_t r)
{
rank = r;
}
Session* get_or_add_session(const entity_inst_t& i) {
Session *s;
auto session_map_entry = session_map.find(i.name);
if (session_map_entry != session_map.end()) {
s = session_map_entry->second;
} else {
s = session_map[i.name] = new Session(ConnectionRef());
s->info.inst = i;
s->last_cap_renew = Session::clock::now();
if (logger) {
logger->set(l_mdssm_session_count, session_map.size());
logger->inc(l_mdssm_session_add);
}
}
return s;
}
static void generate_test_instances(std::list<SessionMapStore*>& ls);
void reset_state()
{
session_map.clear();
}
mds_rank_t rank = MDS_RANK_NONE;
protected:
version_t version = 0;
ceph::unordered_map<entity_name_t, Session*> session_map;
PerfCounters *logger =nullptr;
// total request load avg
double decay_rate = g_conf().get_val<double>("mds_request_load_average_decay_rate");
DecayCounter total_load_avg;
};
class SessionMap : public SessionMapStore {
public:
SessionMap() = delete;
explicit SessionMap(MDSRank *m) : mds(m) {}
~SessionMap() override
{
for (auto p : by_state)
delete p.second;
if (logger) {
g_ceph_context->get_perfcounters_collection()->remove(logger);
}
delete logger;
}
uint64_t set_state(Session *session, int state);
void update_average_session_age();
void register_perfcounters();
void set_version(const version_t v)
{
version = projected = v;
}
void set_projected(const version_t v)
{
projected = v;
}
version_t get_projected() const
{
return projected;
}
version_t get_committed() const
{
return committed;
}
version_t get_committing() const
{
return committing;
}
// sessions
void decode_legacy(ceph::buffer::list::const_iterator& blp) override;
bool empty() const { return session_map.empty(); }
const auto& get_sessions() const {
return session_map;
}
bool is_any_state(int state) const {
auto it = by_state.find(state);
if (it == by_state.end() || it->second->empty())
return false;
return true;
}
bool have_unclosed_sessions() const {
return
is_any_state(Session::STATE_OPENING) ||
is_any_state(Session::STATE_OPEN) ||
is_any_state(Session::STATE_CLOSING) ||
is_any_state(Session::STATE_STALE) ||
is_any_state(Session::STATE_KILLING);
}
bool have_session(entity_name_t w) const {
return session_map.count(w);
}
Session* get_session(entity_name_t w) {
auto session_map_entry = session_map.find(w);
return (session_map_entry != session_map.end() ?
session_map_entry-> second : nullptr);
}
const Session* get_session(entity_name_t w) const {
ceph::unordered_map<entity_name_t, Session*>::const_iterator p = session_map.find(w);
if (p == session_map.end()) {
return NULL;
} else {
return p->second;
}
}
void add_session(Session *s);
void remove_session(Session *s);
void touch_session(Session *session);
Session *get_oldest_session(int state) {
auto by_state_entry = by_state.find(state);
if (by_state_entry == by_state.end() || by_state_entry->second->empty())
return 0;
return by_state_entry->second->front();
}
void dump();
template<typename F>
void get_client_sessions(F&& f) const {
for (const auto& p : session_map) {
auto& session = p.second;
if (session->info.inst.name.is_client())
f(session);
}
}
template<typename C>
void get_client_session_set(C& c) const {
auto f = [&c](auto& s) {
c.insert(s);
};
get_client_sessions(f);
}
// helpers
entity_inst_t& get_inst(entity_name_t w) {
ceph_assert(session_map.count(w));
return session_map[w]->info.inst;
}
version_t get_push_seq(client_t client) {
return get_session(entity_name_t::CLIENT(client.v))->get_push_seq();
}
bool have_completed_request(metareqid_t rid) {
Session *session = get_session(rid.name);
return session && session->have_completed_request(rid.tid, NULL);
}
void trim_completed_requests(entity_name_t c, ceph_tid_t tid) {
Session *session = get_session(c);
ceph_assert(session);
session->trim_completed_requests(tid);
}
void wipe();
void wipe_ino_prealloc();
object_t get_object_name() const;
void load(MDSContext *onload);
void _load_finish(
int operation_r,
int header_r,
int values_r,
bool first,
ceph::buffer::list &header_bl,
std::map<std::string, ceph::buffer::list> &session_vals,
bool more_session_vals);
void load_legacy();
void _load_legacy_finish(int r, ceph::buffer::list &bl);
void save(MDSContext *onsave, version_t needv=0);
void _save_finish(version_t v);
/**
* Advance the version, and mark this session
* as dirty within the new version.
*
* Dirty means journalled but needing writeback
* to the backing store. Must have called
* mark_projected previously for this session.
*/
void mark_dirty(Session *session, bool may_save=true);
/**
* Advance the projected version, and mark this
* session as projected within the new version
*
* Projected means the session is updated in memory
* but we're waiting for the journal write of the update
* to finish. Must subsequently call mark_dirty
* for sessions in the same global order as calls
* to mark_projected.
*/
version_t mark_projected(Session *session);
/**
* During replay, advance versions to account
* for a session modification, and mark the
* session dirty.
*/
void replay_dirty_session(Session *session);
/**
* During replay, if a session no longer present
* would have consumed a version, advance `version`
* and `projected` to account for that.
*/
void replay_advance_version();
/**
* During replay, open sessions, advance versions and
* mark these sessions as dirty.
*/
void replay_open_sessions(version_t event_cmapv,
std::map<client_t,entity_inst_t>& client_map,
std::map<client_t,client_metadata_t>& client_metadata_map);
/**
* For these session IDs, if a session exists with this ID, and it has
* dirty completed_requests, then persist it immediately
* (ahead of usual project/dirty versioned writes
* of the map).
*/
void save_if_dirty(const std::set<entity_name_t> &tgt_sessions,
MDSGatherBuilder *gather_bld);
void hit_session(Session *session);
void handle_conf_change(const std::set <std::string> &changed);
MDSRank *mds;
std::map<int,xlist<Session*>*> by_state;
std::map<version_t, MDSContext::vec> commit_waiters;
// -- loading, saving --
inodeno_t ino;
MDSContext::vec waiting_for_load;
protected:
void _mark_dirty(Session *session, bool may_save);
version_t projected = 0, committing = 0, committed = 0;
std::set<entity_name_t> dirty_sessions;
std::set<entity_name_t> null_sessions;
bool loaded_legacy = false;
private:
uint64_t get_session_count_in_state(int state) {
return !is_any_state(state) ? 0 : by_state[state]->size();
}
void update_average_birth_time(const Session &s, bool added=true) {
uint32_t sessions = session_map.size();
time birth_time = s.get_birth_time();
if (sessions == 1) {
avg_birth_time = added ? birth_time : clock::zero();
return;
}
if (added) {
avg_birth_time = clock::time_point(
((avg_birth_time - clock::zero()) / sessions) * (sessions - 1) +
(birth_time - clock::zero()) / sessions);
} else {
avg_birth_time = clock::time_point(
((avg_birth_time - clock::zero()) / (sessions - 1)) * sessions -
(birth_time - clock::zero()) / (sessions - 1));
}
}
time avg_birth_time = clock::zero();
};
std::ostream& operator<<(std::ostream &out, const Session &s);
#endif
| 23,450 | 26.589412 | 106 | h |
null | ceph-main/src/mds/SimpleLock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SIMPLELOCK_H
#define CEPH_SIMPLELOCK_H
#include <boost/intrusive_ptr.hpp>
#include "MDSCacheObject.h"
#include "MDSContext.h"
// -- lock types --
// see CEPH_LOCK_*
extern "C" {
#include "locks.h"
}
#define CAP_ANY 0
#define CAP_LONER 1
#define CAP_XLOCKER 2
struct MDLockCache;
struct MDLockCacheItem;
struct MutationImpl;
typedef boost::intrusive_ptr<MutationImpl> MutationRef;
struct LockType {
explicit LockType(int t) : type(t) {
switch (type) {
case CEPH_LOCK_DN:
case CEPH_LOCK_IAUTH:
case CEPH_LOCK_ILINK:
case CEPH_LOCK_IXATTR:
case CEPH_LOCK_ISNAP:
case CEPH_LOCK_IFLOCK:
case CEPH_LOCK_IPOLICY:
sm = &sm_simplelock;
break;
case CEPH_LOCK_IDFT:
case CEPH_LOCK_INEST:
sm = &sm_scatterlock;
break;
case CEPH_LOCK_IFILE:
sm = &sm_filelock;
break;
case CEPH_LOCK_DVERSION:
case CEPH_LOCK_IVERSION:
sm = &sm_locallock;
break;
default:
sm = 0;
}
}
int type;
const sm_t *sm;
};
class SimpleLock {
public:
// waiting
static const uint64_t WAIT_RD = (1<<0); // to read
static const uint64_t WAIT_WR = (1<<1); // to write
static const uint64_t WAIT_XLOCK = (1<<2); // to xlock (** dup)
static const uint64_t WAIT_STABLE = (1<<2); // for a stable state
static const uint64_t WAIT_REMOTEXLOCK = (1<<3); // for a remote xlock
static const int WAIT_BITS = 4;
static const uint64_t WAIT_ALL = ((1<<WAIT_BITS)-1);
static std::string_view get_state_name(int n) {
switch (n) {
case LOCK_UNDEF: return "UNDEF";
case LOCK_SYNC: return "sync";
case LOCK_LOCK: return "lock";
case LOCK_PREXLOCK: return "prexlock";
case LOCK_XLOCK: return "xlock";
case LOCK_XLOCKDONE: return "xlockdone";
case LOCK_XLOCKSNAP: return "xlocksnap";
case LOCK_LOCK_XLOCK: return "lock->xlock";
case LOCK_SYNC_LOCK: return "sync->lock";
case LOCK_LOCK_SYNC: return "lock->sync";
case LOCK_REMOTEXLOCK: return "remote_xlock";
case LOCK_EXCL: return "excl";
case LOCK_EXCL_SYNC: return "excl->sync";
case LOCK_EXCL_LOCK: return "excl->lock";
case LOCK_SYNC_EXCL: return "sync->excl";
case LOCK_LOCK_EXCL: return "lock->excl";
case LOCK_XSYN: return "xsyn";
case LOCK_XSYN_EXCL: return "xsyn->excl";
case LOCK_EXCL_XSYN: return "excl->xsyn";
case LOCK_XSYN_SYNC: return "xsyn->sync";
case LOCK_XSYN_LOCK: return "xsyn->lock";
case LOCK_XSYN_MIX: return "xsyn->mix";
case LOCK_SYNC_MIX: return "sync->mix";
case LOCK_SYNC_MIX2: return "sync->mix(2)";
case LOCK_LOCK_TSYN: return "lock->tsyn";
case LOCK_MIX_LOCK: return "mix->lock";
case LOCK_MIX_LOCK2: return "mix->lock(2)";
case LOCK_MIX: return "mix";
case LOCK_MIX_TSYN: return "mix->tsyn";
case LOCK_TSYN_MIX: return "tsyn->mix";
case LOCK_TSYN_LOCK: return "tsyn->lock";
case LOCK_TSYN: return "tsyn";
case LOCK_MIX_SYNC: return "mix->sync";
case LOCK_MIX_SYNC2: return "mix->sync(2)";
case LOCK_EXCL_MIX: return "excl->mix";
case LOCK_MIX_EXCL: return "mix->excl";
case LOCK_PRE_SCAN: return "*->scan";
case LOCK_SCAN: return "scan";
case LOCK_SNAP_SYNC: return "snap->sync";
default: ceph_abort(); return std::string_view();
}
}
static std::string_view get_lock_type_name(int t) {
switch (t) {
case CEPH_LOCK_DN: return "dn";
case CEPH_LOCK_DVERSION: return "dversion";
case CEPH_LOCK_IVERSION: return "iversion";
case CEPH_LOCK_IFILE: return "ifile";
case CEPH_LOCK_IAUTH: return "iauth";
case CEPH_LOCK_ILINK: return "ilink";
case CEPH_LOCK_IDFT: return "idft";
case CEPH_LOCK_INEST: return "inest";
case CEPH_LOCK_IXATTR: return "ixattr";
case CEPH_LOCK_ISNAP: return "isnap";
case CEPH_LOCK_IFLOCK: return "iflock";
case CEPH_LOCK_IPOLICY: return "ipolicy";
default: return "unknown";
}
}
static std::string_view get_lock_action_name(int a) {
switch (a) {
case LOCK_AC_SYNC: return "sync";
case LOCK_AC_MIX: return "mix";
case LOCK_AC_LOCK: return "lock";
case LOCK_AC_LOCKFLUSHED: return "lockflushed";
case LOCK_AC_SYNCACK: return "syncack";
case LOCK_AC_MIXACK: return "mixack";
case LOCK_AC_LOCKACK: return "lockack";
case LOCK_AC_REQSCATTER: return "reqscatter";
case LOCK_AC_REQUNSCATTER: return "requnscatter";
case LOCK_AC_NUDGE: return "nudge";
case LOCK_AC_REQRDLOCK: return "reqrdlock";
default: return "???";
}
}
SimpleLock(MDSCacheObject *o, LockType *lt) :
type(lt),
parent(o)
{}
virtual ~SimpleLock() {}
client_t get_excl_client() const {
return have_more() ? more()->excl_client : -1;
}
void set_excl_client(client_t c) {
if (c < 0 && !have_more())
return; // default is -1
more()->excl_client = c;
}
virtual bool is_scatterlock() const {
return false;
}
virtual bool is_locallock() const {
return false;
}
// parent
MDSCacheObject *get_parent() { return parent; }
int get_type() const { return type->type; }
const sm_t* get_sm() const { return type->sm; }
int get_wait_shift() const;
int get_cap_shift() const;
int get_cap_mask() const;
void decode_locked_state(const ceph::buffer::list& bl) {
parent->decode_lock_state(type->type, bl);
}
void encode_locked_state(ceph::buffer::list& bl) {
parent->encode_lock_state(type->type, bl);
}
void finish_waiters(uint64_t mask, int r=0) {
parent->finish_waiting(mask << get_wait_shift(), r);
}
void take_waiting(uint64_t mask, MDSContext::vec& ls) {
parent->take_waiting(mask << get_wait_shift(), ls);
}
void add_waiter(uint64_t mask, MDSContext *c) {
parent->add_waiter((mask << get_wait_shift()) | MDSCacheObject::WAIT_ORDERED, c);
}
bool is_waiter_for(uint64_t mask) const {
return parent->is_waiter_for(mask << get_wait_shift());
}
bool is_cached() const {
return state_flags & CACHED;
}
void add_cache(MDLockCacheItem& item);
void remove_cache(MDLockCacheItem& item);
std::vector<MDLockCache*> get_active_caches();
// state
int get_state() const { return state; }
int set_state(int s) {
state = s;
//assert(!is_stable() || gather_set.size() == 0); // gather should be empty in stable states.
return s;
}
void set_state_rejoin(int s, MDSContext::vec& waiters, bool survivor) {
ceph_assert(!get_parent()->is_auth());
// If lock in the replica object was not in SYNC state when auth mds of the object failed.
// Auth mds of the object may take xlock on the lock and change the object when replaying
// unsafe requests.
if (!survivor || state != LOCK_SYNC)
mark_need_recover();
state = s;
if (is_stable())
take_waiting(SimpleLock::WAIT_ALL, waiters);
}
bool is_stable() const {
return get_sm()->states[state].next == 0;
}
bool is_unstable_and_locked() const {
return (!is_stable() && is_locked());
}
bool is_locked() const {
return is_rdlocked() || is_wrlocked() || is_xlocked();
}
int get_next_state() {
return get_sm()->states[state].next;
}
bool is_sync_and_unlocked() const {
return
get_state() == LOCK_SYNC &&
!is_rdlocked() &&
!is_leased() &&
!is_wrlocked() &&
!is_xlocked();
}
/*
bool fw_rdlock_to_auth() {
return get_sm()->states[state].can_rdlock == FW;
}
*/
bool req_rdlock_from_auth() {
return get_sm()->states[state].can_rdlock == REQ;
}
// gather set
static std::set<int32_t> empty_gather_set;
// int32_t: <0 is client, >=0 is MDS rank
const std::set<int32_t>& get_gather_set() const {
return have_more() ? more()->gather_set : empty_gather_set;
}
void init_gather() {
for (const auto& p : parent->get_replicas()) {
more()->gather_set.insert(p.first);
}
}
bool is_gathering() const {
return have_more() && !more()->gather_set.empty();
}
bool is_gathering(int32_t i) const {
return have_more() && more()->gather_set.count(i);
}
void clear_gather() {
if (have_more())
more()->gather_set.clear();
}
void remove_gather(int32_t i) {
if (have_more())
more()->gather_set.erase(i);
}
virtual bool is_dirty() const { return false; }
virtual bool is_stale() const { return false; }
virtual bool is_flushing() const { return false; }
virtual bool is_flushed() const { return false; }
virtual void clear_flushed() { }
// can_*
bool can_lease(client_t client) const {
return get_sm()->states[state].can_lease == ANY ||
(get_sm()->states[state].can_lease == AUTH && parent->is_auth()) ||
(get_sm()->states[state].can_lease == XCL && client >= 0 && get_xlock_by_client() == client);
}
bool can_read(client_t client) const {
return get_sm()->states[state].can_read == ANY ||
(get_sm()->states[state].can_read == AUTH && parent->is_auth()) ||
(get_sm()->states[state].can_read == XCL && client >= 0 && get_xlock_by_client() == client);
}
bool can_read_projected(client_t client) const {
return get_sm()->states[state].can_read_projected == ANY ||
(get_sm()->states[state].can_read_projected == AUTH && parent->is_auth()) ||
(get_sm()->states[state].can_read_projected == XCL && client >= 0 && get_xlock_by_client() == client);
}
bool can_rdlock(client_t client) const {
return get_sm()->states[state].can_rdlock == ANY ||
(get_sm()->states[state].can_rdlock == AUTH && parent->is_auth()) ||
(get_sm()->states[state].can_rdlock == XCL && client >= 0 && get_xlock_by_client() == client);
}
bool can_wrlock(client_t client) const {
return get_sm()->states[state].can_wrlock == ANY ||
(get_sm()->states[state].can_wrlock == AUTH && parent->is_auth()) ||
(get_sm()->states[state].can_wrlock == XCL && client >= 0 && (get_xlock_by_client() == client ||
get_excl_client() == client));
}
bool can_force_wrlock(client_t client) const {
return get_sm()->states[state].can_force_wrlock == ANY ||
(get_sm()->states[state].can_force_wrlock == AUTH && parent->is_auth()) ||
(get_sm()->states[state].can_force_wrlock == XCL && client >= 0 && (get_xlock_by_client() == client ||
get_excl_client() == client));
}
bool can_xlock(client_t client) const {
return get_sm()->states[state].can_xlock == ANY ||
(get_sm()->states[state].can_xlock == AUTH && parent->is_auth()) ||
(get_sm()->states[state].can_xlock == XCL && client >= 0 && get_xlock_by_client() == client);
}
// rdlock
bool is_rdlocked() const { return num_rdlock > 0; }
int get_rdlock() {
if (!num_rdlock)
parent->get(MDSCacheObject::PIN_LOCK);
return ++num_rdlock;
}
int put_rdlock() {
ceph_assert(num_rdlock>0);
--num_rdlock;
if (num_rdlock == 0)
parent->put(MDSCacheObject::PIN_LOCK);
return num_rdlock;
}
int get_num_rdlocks() const {
return num_rdlock;
}
// wrlock
void get_wrlock(bool force=false) {
//assert(can_wrlock() || force);
if (more()->num_wrlock == 0)
parent->get(MDSCacheObject::PIN_LOCK);
++more()->num_wrlock;
}
void put_wrlock() {
--more()->num_wrlock;
if (more()->num_wrlock == 0) {
parent->put(MDSCacheObject::PIN_LOCK);
try_clear_more();
}
}
bool is_wrlocked() const {
return have_more() && more()->num_wrlock > 0;
}
int get_num_wrlocks() const {
return have_more() ? more()->num_wrlock : 0;
}
// xlock
void get_xlock(MutationRef who, client_t client) {
ceph_assert(get_xlock_by() == MutationRef());
ceph_assert(state == LOCK_XLOCK || is_locallock() ||
state == LOCK_LOCK /* if we are a peer */);
parent->get(MDSCacheObject::PIN_LOCK);
more()->num_xlock++;
more()->xlock_by = who;
more()->xlock_by_client = client;
}
void set_xlock_done() {
ceph_assert(more()->xlock_by);
ceph_assert(state == LOCK_XLOCK || is_locallock() ||
state == LOCK_LOCK /* if we are a peer */);
if (!is_locallock())
state = LOCK_XLOCKDONE;
more()->xlock_by.reset();
}
void put_xlock() {
ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE ||
state == LOCK_XLOCKSNAP || state == LOCK_LOCK_XLOCK ||
state == LOCK_LOCK || /* if we are a leader of a peer */
is_locallock());
--more()->num_xlock;
parent->put(MDSCacheObject::PIN_LOCK);
if (more()->num_xlock == 0) {
more()->xlock_by.reset();
more()->xlock_by_client = -1;
try_clear_more();
}
}
bool is_xlocked() const {
return have_more() && more()->num_xlock > 0;
}
int get_num_xlocks() const {
return have_more() ? more()->num_xlock : 0;
}
client_t get_xlock_by_client() const {
return have_more() ? more()->xlock_by_client : -1;
}
bool is_xlocked_by_client(client_t c) const {
return have_more() ? more()->xlock_by_client == c : false;
}
MutationRef get_xlock_by() const {
return have_more() ? more()->xlock_by : MutationRef();
}
// lease
bool is_leased() const {
return state_flags & LEASED;
}
void get_client_lease() {
ceph_assert(!is_leased());
state_flags |= LEASED;
}
void put_client_lease() {
ceph_assert(is_leased());
state_flags &= ~LEASED;
}
bool needs_recover() const {
return state_flags & NEED_RECOVER;
}
void mark_need_recover() {
state_flags |= NEED_RECOVER;
}
void clear_need_recover() {
state_flags &= ~NEED_RECOVER;
}
// encode/decode
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 2, bl);
encode(state, bl);
if (have_more())
encode(more()->gather_set, bl);
else
encode(empty_gather_set, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(2, p);
decode(state, p);
std::set<__s32> g;
decode(g, p);
if (!g.empty())
more()->gather_set.swap(g);
DECODE_FINISH(p);
}
void encode_state_for_replica(ceph::buffer::list& bl) const {
__s16 s = get_replica_state();
using ceph::encode;
encode(s, bl);
}
void decode_state(ceph::buffer::list::const_iterator& p, bool is_new=true) {
using ceph::decode;
__s16 s;
decode(s, p);
if (is_new)
state = s;
}
void decode_state_rejoin(ceph::buffer::list::const_iterator& p, MDSContext::vec& waiters, bool survivor) {
__s16 s;
using ceph::decode;
decode(s, p);
set_state_rejoin(s, waiters, survivor);
}
// caps
bool is_loner_mode() const {
return get_sm()->states[state].loner;
}
int gcaps_allowed_ever() const {
return parent->is_auth() ? get_sm()->allowed_ever_auth : get_sm()->allowed_ever_replica;
}
int gcaps_allowed(int who, int s=-1) const {
if (s < 0) s = state;
if (parent->is_auth()) {
if (get_xlock_by_client() >= 0 && who == CAP_XLOCKER)
return get_sm()->states[s].xlocker_caps | get_sm()->states[s].caps; // xlocker always gets more
else if (is_loner_mode() && who == CAP_ANY)
return get_sm()->states[s].caps;
else
return get_sm()->states[s].loner_caps | get_sm()->states[s].caps; // loner always gets more
} else
return get_sm()->states[s].replica_caps;
}
int gcaps_careful() const {
if (get_num_wrlocks())
return get_sm()->careful;
return 0;
}
int gcaps_xlocker_mask(client_t client) const {
if (client == get_xlock_by_client())
return type->type == CEPH_LOCK_IFILE ? 0xf : (CEPH_CAP_GSHARED|CEPH_CAP_GEXCL);
return 0;
}
// simplelock specifics
int get_replica_state() const {
return get_sm()->states[state].replica_state;
}
void export_twiddle() {
clear_gather();
state = get_replica_state();
}
bool remove_replica(int from) {
if (is_gathering(from)) {
remove_gather(from);
if (!is_gathering())
return true;
}
return false;
}
bool do_import(int from, int to) {
if (!is_stable()) {
remove_gather(from);
remove_gather(to);
if (!is_gathering())
return true;
}
if (!is_stable() && !is_gathering())
return true;
return false;
}
void _print(std::ostream& out) const {
out << get_lock_type_name(get_type()) << " ";
out << get_state_name(get_state());
if (!get_gather_set().empty())
out << " g=" << get_gather_set();
if (is_leased())
out << " l";
if (is_rdlocked())
out << " r=" << get_num_rdlocks();
if (is_wrlocked())
out << " w=" << get_num_wrlocks();
if (is_xlocked()) {
out << " x=" << get_num_xlocks();
if (get_xlock_by())
out << " by " << get_xlock_by();
}
/*if (is_stable())
out << " stable";
else
out << " unstable";
*/
}
/**
* Write bare values (caller must be in an object section)
* to formatter, or nothing if is_sync_and_unlocked.
*/
void dump(ceph::Formatter *f) const;
virtual void print(std::ostream& out) const {
out << "(";
_print(out);
out << ")";
}
LockType *type;
protected:
// parent (what i lock)
MDSCacheObject *parent;
// lock state
__s16 state = LOCK_SYNC;
__s16 state_flags = 0;
enum {
LEASED = 1 << 0,
NEED_RECOVER = 1 << 1,
CACHED = 1 << 2,
};
private:
// XXX not in mempool
struct unstable_bits_t {
unstable_bits_t();
bool empty() {
return
gather_set.empty() &&
num_wrlock == 0 &&
num_xlock == 0 &&
xlock_by.get() == NULL &&
xlock_by_client == -1 &&
excl_client == -1 &&
lock_caches.empty();
}
std::set<__s32> gather_set; // auth+rep. >= 0 is mds, < 0 is client
// local state
int num_wrlock = 0, num_xlock = 0;
MutationRef xlock_by;
client_t xlock_by_client = -1;
client_t excl_client = -1;
elist<MDLockCacheItem*> lock_caches;
};
bool have_more() const { return _unstable ? true : false; }
unstable_bits_t *more() const {
if (!_unstable)
_unstable.reset(new unstable_bits_t);
return _unstable.get();
}
void try_clear_more() {
if (_unstable && _unstable->empty()) {
_unstable.reset();
}
}
int num_rdlock = 0;
mutable std::unique_ptr<unstable_bits_t> _unstable;
};
WRITE_CLASS_ENCODER(SimpleLock)
inline std::ostream& operator<<(std::ostream& out, const SimpleLock& l)
{
l.print(out);
return out;
}
#endif
| 18,874 | 27.255988 | 108 | h |
null | ceph-main/src/mds/SnapClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SNAPCLIENT_H
#define CEPH_SNAPCLIENT_H
#include <string_view>
#include "MDSTableClient.h"
#include "snap.h"
#include "MDSContext.h"
class MDSRank;
class LogSegment;
class SnapClient : public MDSTableClient {
public:
explicit SnapClient(MDSRank *m) :
MDSTableClient(m, TABLE_SNAP) {}
void resend_queries() override;
void handle_query_result(const cref_t<MMDSTableRequest> &m) override;
void handle_notify_prep(const cref_t<MMDSTableRequest> &m) override;
void notify_commit(version_t tid) override;
void prepare_create(inodeno_t dirino, std::string_view name, utime_t stamp,
version_t *pstid, bufferlist *pbl, MDSContext *onfinish) {
bufferlist bl;
__u32 op = TABLE_OP_CREATE;
encode(op, bl);
encode(dirino, bl);
encode(name, bl);
encode(stamp, bl);
_prepare(bl, pstid, pbl, onfinish);
}
void prepare_create_realm(inodeno_t ino, version_t *pstid, bufferlist *pbl, MDSContext *onfinish) {
bufferlist bl;
__u32 op = TABLE_OP_CREATE;
encode(op, bl);
encode(ino, bl);
_prepare(bl, pstid, pbl, onfinish);
}
void prepare_destroy(inodeno_t ino, snapid_t snapid, version_t *pstid, bufferlist *pbl, MDSContext *onfinish) {
bufferlist bl;
__u32 op = TABLE_OP_DESTROY;
encode(op, bl);
encode(ino, bl);
encode(snapid, bl);
_prepare(bl, pstid, pbl, onfinish);
}
void prepare_update(inodeno_t ino, snapid_t snapid, std::string_view name, utime_t stamp,
version_t *pstid, MDSContext *onfinish) {
bufferlist bl;
__u32 op = TABLE_OP_UPDATE;
encode(op, bl);
encode(ino, bl);
encode(snapid, bl);
encode(name, bl);
encode(stamp, bl);
_prepare(bl, pstid, NULL, onfinish);
}
version_t get_cached_version() const { return cached_version; }
void refresh(version_t want, MDSContext *onfinish);
void sync(MDSContext *onfinish);
bool is_synced() const { return synced; }
void wait_for_sync(MDSContext *c) {
ceph_assert(!synced);
waiting_for_version[std::max<version_t>(cached_version, 1)].push_back(c);
}
snapid_t get_last_created() const { return cached_last_created; }
snapid_t get_last_destroyed() const { return cached_last_destroyed; }
snapid_t get_last_seq() const { return std::max(cached_last_destroyed, cached_last_created); }
void get_snaps(std::set<snapid_t>& snaps) const;
std::set<snapid_t> filter(const std::set<snapid_t>& snaps) const;
const SnapInfo* get_snap_info(snapid_t snapid) const;
void get_snap_infos(std::map<snapid_t, const SnapInfo*>& infomap, const std::set<snapid_t>& snaps) const;
int dump_cache(Formatter *f) const;
private:
version_t cached_version = 0;
snapid_t cached_last_created = 0, cached_last_destroyed = 0;
std::map<snapid_t, SnapInfo> cached_snaps;
std::map<version_t, SnapInfo> cached_pending_update;
std::map<version_t, std::pair<snapid_t,snapid_t> > cached_pending_destroy;
std::set<version_t> committing_tids;
std::map<version_t, MDSContext::vec > waiting_for_version;
uint64_t sync_reqid = 0;
bool synced = false;
};
#endif
| 3,507 | 29.77193 | 113 | h |
null | ceph-main/src/mds/SnapRealm.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_SNAPREALM_H
#define CEPH_MDS_SNAPREALM_H
#include <string_view>
#include "mdstypes.h"
#include "snap.h"
#include "include/xlist.h"
#include "include/elist.h"
#include "common/snap_types.h"
#include "MDSContext.h"
struct SnapRealm {
public:
SnapRealm(MDCache *c, CInode *in);
bool exists(std::string_view name) const {
for (auto p = srnode.snaps.begin(); p != srnode.snaps.end(); ++p) {
if (p->second.name == name)
return true;
}
return false;
}
void prune_past_parent_snaps();
bool has_past_parent_snaps() const {
return !srnode.past_parent_snaps.empty();
}
void build_snap_set() const;
void get_snap_info(std::map<snapid_t, const SnapInfo*>& infomap, snapid_t first=0, snapid_t last=CEPH_NOSNAP);
const ceph::buffer::list& get_snap_trace() const;
const ceph::buffer::list& get_snap_trace_new() const;
void build_snap_trace() const;
std::string_view get_snapname(snapid_t snapid, inodeno_t atino);
snapid_t resolve_snapname(std::string_view name, inodeno_t atino, snapid_t first=0, snapid_t last=CEPH_NOSNAP);
const std::set<snapid_t>& get_snaps() const;
const SnapContext& get_snap_context() const;
void invalidate_cached_snaps() {
cached_seq = 0;
}
snapid_t get_last_created() {
check_cache();
return cached_last_created;
}
snapid_t get_last_destroyed() {
check_cache();
return cached_last_destroyed;
}
snapid_t get_newest_snap() {
check_cache();
if (cached_snaps.empty())
return 0;
else
return *cached_snaps.rbegin();
}
snapid_t get_newest_seq() {
check_cache();
return cached_seq;
}
snapid_t get_snap_following(snapid_t follows) {
check_cache();
const std::set<snapid_t>& s = get_snaps();
auto p = s.upper_bound(follows);
if (p != s.end())
return *p;
return CEPH_NOSNAP;
}
bool has_snaps_in_range(snapid_t first, snapid_t last) {
check_cache();
const auto& s = get_snaps();
auto p = s.lower_bound(first);
return (p != s.end() && *p <= last);
}
inodeno_t get_subvolume_ino() {
check_cache();
return cached_subvolume_ino;
}
void adjust_parent();
void split_at(SnapRealm *child);
void merge_to(SnapRealm *newparent);
void add_cap(client_t client, Capability *cap) {
auto client_caps_entry = client_caps.find(client);
if (client_caps_entry == client_caps.end())
client_caps_entry = client_caps.emplace(client,
new xlist<Capability*>).first;
client_caps_entry->second->push_back(&cap->item_snaprealm_caps);
}
void remove_cap(client_t client, Capability *cap) {
cap->item_snaprealm_caps.remove_myself();
auto found = client_caps.find(client);
if (found != client_caps.end() && found->second->empty()) {
delete found->second;
client_caps.erase(found);
}
}
// realm state
sr_t srnode;
// in-memory state
MDCache *mdcache;
CInode *inode;
SnapRealm *parent = nullptr;
std::set<SnapRealm*> open_children; // active children that are currently open
elist<CInode*> inodes_with_caps; // for efficient realm splits
std::map<client_t, xlist<Capability*>* > client_caps; // to identify clients who need snap notifications
protected:
void check_cache() const;
private:
bool global;
// cache
mutable snapid_t cached_seq; // max seq over self and all past+present parents.
mutable snapid_t cached_last_created; // max last_created over all past+present parents
mutable snapid_t cached_last_destroyed;
mutable std::set<snapid_t> cached_snaps;
mutable SnapContext cached_snap_context;
mutable ceph::buffer::list cached_snap_trace;
mutable ceph::buffer::list cached_snap_trace_new;
mutable inodeno_t cached_subvolume_ino = 0;
};
std::ostream& operator<<(std::ostream& out, const SnapRealm &realm);
#endif
| 4,281 | 26.986928 | 113 | h |
null | ceph-main/src/mds/SnapServer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_SNAPSERVER_H
#define CEPH_SNAPSERVER_H
#include "MDSTableServer.h"
#include "snap.h"
#include "messages/MRemoveSnaps.h"
class MDSRank;
class MonClient;
class SnapServer : public MDSTableServer {
public:
SnapServer(MDSRank *m, MonClient *monc)
: MDSTableServer(m, TABLE_SNAP), mon_client(monc) {}
SnapServer() : MDSTableServer(NULL, TABLE_SNAP) {}
void handle_remove_snaps(const cref_t<MRemoveSnaps> &m);
void reset_state() override;
bool upgrade_format() {
// upgraded from old filesystem
ceph_assert(is_active());
ceph_assert(last_snap > 0);
bool upgraded = false;
if (get_version() == 0) {
// version 0 confuses snapclient code
reset();
upgraded = true;
}
if (snaprealm_v2_since == CEPH_NOSNAP) {
// new snapshots will have new format snaprealms
snaprealm_v2_since = last_snap + 1;
upgraded = true;
}
return upgraded;
}
void check_osd_map(bool force);
bool can_allow_multimds_snaps() const {
return snaps.empty() || snaps.begin()->first >= snaprealm_v2_since;
}
void encode(bufferlist& bl) const {
encode_server_state(bl);
}
void decode(bufferlist::const_iterator& bl) {
decode_server_state(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<SnapServer*>& ls);
bool force_update(snapid_t last, snapid_t v2_since,
std::map<snapid_t, SnapInfo>& _snaps);
protected:
void encode_server_state(bufferlist& bl) const override {
ENCODE_START(5, 3, bl);
encode(last_snap, bl);
encode(snaps, bl);
encode(need_to_purge, bl);
encode(pending_update, bl);
encode(pending_destroy, bl);
encode(pending_noop, bl);
encode(last_created, bl);
encode(last_destroyed, bl);
encode(snaprealm_v2_since, bl);
ENCODE_FINISH(bl);
}
void decode_server_state(bufferlist::const_iterator& bl) override {
DECODE_START_LEGACY_COMPAT_LEN(5, 3, 3, bl);
decode(last_snap, bl);
decode(snaps, bl);
decode(need_to_purge, bl);
decode(pending_update, bl);
if (struct_v >= 2)
decode(pending_destroy, bl);
else {
std::map<version_t, snapid_t> t;
decode(t, bl);
for (auto& [ver, snapid] : t) {
pending_destroy[ver].first = snapid;
}
}
decode(pending_noop, bl);
if (struct_v >= 4) {
decode(last_created, bl);
decode(last_destroyed, bl);
} else {
last_created = last_snap;
last_destroyed = last_snap;
}
if (struct_v >= 5)
decode(snaprealm_v2_since, bl);
else
snaprealm_v2_since = CEPH_NOSNAP;
DECODE_FINISH(bl);
}
// server bits
void _prepare(const bufferlist &bl, uint64_t reqid, mds_rank_t bymds, bufferlist &out) override;
void _get_reply_buffer(version_t tid, bufferlist *pbl) const override;
void _commit(version_t tid, cref_t<MMDSTableRequest> req) override;
void _rollback(version_t tid) override;
void _server_update(bufferlist& bl) override;
bool _notify_prep(version_t tid) override;
void handle_query(const cref_t<MMDSTableRequest> &m) override;
MonClient *mon_client = nullptr;
snapid_t last_snap = 0;
snapid_t last_created, last_destroyed;
snapid_t snaprealm_v2_since;
std::map<snapid_t, SnapInfo> snaps;
std::map<int, std::set<snapid_t> > need_to_purge;
std::map<version_t, SnapInfo> pending_update;
std::map<version_t, std::pair<snapid_t,snapid_t> > pending_destroy; // (removed_snap, seq)
std::set<version_t> pending_noop;
version_t last_checked_osdmap = 0;
};
WRITE_CLASS_ENCODER(SnapServer)
#endif
| 4,006 | 27.020979 | 98 | h |
null | ceph-main/src/mds/StrayManager.h | // vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef STRAY_MANAGER_H
#define STRAY_MANAGER_H
#include "include/common_fwd.h"
#include "include/elist.h"
#include <list>
#include "Mutation.h"
#include "PurgeQueue.h"
class MDSRank;
class CInode;
class CDentry;
class StrayManager
{
// My public interface is for consumption by MDCache
public:
explicit StrayManager(MDSRank *mds, PurgeQueue &purge_queue_);
void set_logger(PerfCounters *l) {logger = l;}
void activate();
bool eval_stray(CDentry *dn);
void set_num_strays(uint64_t num);
uint64_t get_num_strays() const { return num_strays; }
/**
* Queue dentry for later evaluation. (evaluate it while not in the
* middle of another metadata operation)
*/
void queue_delayed(CDentry *dn);
/**
* Eval strays in the delayed_eval_stray list
*/
void advance_delayed();
/**
* Remote dentry potentially points to a stray. When it is touched,
* call in here to evaluate it for migration (move a stray residing
* on another MDS to this MDS) or reintegration (move a stray dentry's
* inode into a non-stray hardlink dentry and clean up the stray).
*
* @param stray_dn a stray dentry whose inode has been referenced
* by a remote dentry
* @param remote_dn (optional) which remote dentry was touched
* in an operation that led us here: this is used
* as a hint for which remote to reintegrate into
* if there are multiple remotes.
*/
void eval_remote(CDentry *remote_dn);
/**
* Given a dentry within one of my stray directories,
* send it off to a stray directory in another MDS.
*
* This is for use:
* * Case A: when shutting down a rank, we migrate strays
* away from ourselves rather than waiting for purge
* * Case B: when a client request has a trace that refers to
* a stray inode on another MDS, we migrate that inode from
* there to here, in order that we can later re-integrate it
* here.
*
* In case B, the receiver should be calling into eval_stray
* on completion of mv (i.e. inode put), resulting in a subsequent
* reintegration.
*/
void migrate_stray(CDentry *dn, mds_rank_t dest);
/**
* Update stats to reflect a newly created stray dentry. Needed
* because stats on strays live here, but creation happens
* in Server or MDCache. For our purposes "creation" includes
* loading a stray from a dirfrag and migrating a stray from
* another MDS, in addition to creations per-se.
*/
void notify_stray_created();
/**
* Update stats to reflect a removed stray dentry. Needed because
* stats on strays live here, but removal happens in Server or
* MDCache. Also includes migration (rename) of strays from
* this MDS to another MDS.
*/
void notify_stray_removed();
protected:
friend class StrayManagerIOContext;
friend class StrayManagerLogContext;
friend class StrayManagerContext;
friend class C_StraysFetched;
friend class C_RetryEnqueue;
friend class C_PurgeStrayLogged;
friend class C_TruncateStrayLogged;
friend class C_IO_PurgeStrayPurged;
void truncate(CDentry *dn);
/**
* Purge a dentry from a stray directory. This function
* is called once eval_stray is satisfied and StrayManager
* throttling is also satisfied. There is no going back
* at this stage!
*/
void purge(CDentry *dn);
/**
* Completion handler for a Filer::purge on a stray inode.
*/
void _purge_stray_purged(CDentry *dn, bool only_head);
void _purge_stray_logged(CDentry *dn, version_t pdv, MutationRef& mut);
/**
* Callback: we have logged the update to an inode's metadata
* reflecting it's newly-zeroed length.
*/
void _truncate_stray_logged(CDentry *dn, MutationRef &mut);
/**
* Call this on a dentry that has been identified as
* eligible for purging. It will be passed on to PurgeQueue.
*/
void enqueue(CDentry *dn, bool trunc);
/**
* Final part of enqueue() which we may have to retry
* after opening snap parents.
*/
void _enqueue(CDentry *dn, bool trunc);
/**
* When hard links exist to an inode whose primary dentry
* is unlinked, the inode gets a stray primary dentry.
*
* We may later "reintegrate" the inode into a remaining
* non-stray dentry (one of what was previously a remote
* dentry) by issuing a rename from the stray to the other
* dentry.
*/
void reintegrate_stray(CDentry *dn, CDentry *rlink);
/**
* Evaluate a stray dentry for purging or reintegration.
*
* purging: If the inode has no linkage, and no more references, then
* we may decide to purge it.
*
* reintegration: If the inode still has linkage, then it means someone else
* (a hard link) is still referring to it, and we should
* think about reintegrating that inode into the remote dentry.
*
* @returns true if the dentry will be purged (caller should never
* take more refs after this happens), else false.
*/
bool _eval_stray(CDentry *dn);
void _eval_stray_remote(CDentry *stray_dn, CDentry *remote_dn);
// Has passed through eval_stray and still has refs
elist<CDentry*> delayed_eval_stray;
// strays that have been trimmed from cache
std::set<std::string> trimmed_strays;
// Global references for doing I/O
MDSRank *mds;
PerfCounters *logger = nullptr;
bool started = false;
// Stray dentries for this rank (including those not in cache)
uint64_t num_strays = 0;
// Stray dentries
uint64_t num_strays_delayed = 0;
/**
* Entries that have entered enqueue() but not been persistently
* recorded by PurgeQueue yet
*/
uint64_t num_strays_enqueuing = 0;
PurgeQueue &purge_queue;
};
#endif // STRAY_MANAGER_H
| 6,113 | 29.723618 | 80 | h |
null | ceph-main/src/mds/cephfs_features.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPHFS_FEATURES_H
#define CEPHFS_FEATURES_H
#include "include/cephfs/metrics/Types.h"
class feature_bitset_t;
namespace ceph {
class Formatter;
}
// When adding a new release, please update the "current" release below, add a
// feature bit for that release, add that feature bit to CEPHFS_FEATURES_ALL,
// and update Server::update_required_client_features(). This feature bit
// is used to indicate that operator only wants clients from that release or
// later to mount CephFS.
#define CEPHFS_CURRENT_RELEASE CEPH_RELEASE_REEF
// The first 5 bits are reserved for old ceph releases.
#define CEPHFS_FEATURE_JEWEL 5
#define CEPHFS_FEATURE_KRAKEN 6
#define CEPHFS_FEATURE_LUMINOUS 7
#define CEPHFS_FEATURE_MIMIC 8
#define CEPHFS_FEATURE_REPLY_ENCODING 9
#define CEPHFS_FEATURE_RECLAIM_CLIENT 10
#define CEPHFS_FEATURE_LAZY_CAP_WANTED 11
#define CEPHFS_FEATURE_MULTI_RECONNECT 12
#define CEPHFS_FEATURE_NAUTILUS 12
#define CEPHFS_FEATURE_DELEG_INO 13
#define CEPHFS_FEATURE_OCTOPUS 13
#define CEPHFS_FEATURE_METRIC_COLLECT 14
#define CEPHFS_FEATURE_ALTERNATE_NAME 15
#define CEPHFS_FEATURE_NOTIFY_SESSION_STATE 16
#define CEPHFS_FEATURE_OP_GETVXATTR 17
#define CEPHFS_FEATURE_32BITS_RETRY_FWD 18
#define CEPHFS_FEATURE_NEW_SNAPREALM_INFO 19
#define CEPHFS_FEATURE_MAX 19
#define CEPHFS_FEATURES_ALL { \
0, 1, 2, 3, 4, \
CEPHFS_FEATURE_JEWEL, \
CEPHFS_FEATURE_KRAKEN, \
CEPHFS_FEATURE_LUMINOUS, \
CEPHFS_FEATURE_MIMIC, \
CEPHFS_FEATURE_REPLY_ENCODING, \
CEPHFS_FEATURE_RECLAIM_CLIENT, \
CEPHFS_FEATURE_LAZY_CAP_WANTED, \
CEPHFS_FEATURE_MULTI_RECONNECT, \
CEPHFS_FEATURE_NAUTILUS, \
CEPHFS_FEATURE_DELEG_INO, \
CEPHFS_FEATURE_OCTOPUS, \
CEPHFS_FEATURE_METRIC_COLLECT, \
CEPHFS_FEATURE_ALTERNATE_NAME, \
CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \
CEPHFS_FEATURE_OP_GETVXATTR, \
CEPHFS_FEATURE_32BITS_RETRY_FWD, \
CEPHFS_FEATURE_NEW_SNAPREALM_INFO \
}
#define CEPHFS_METRIC_FEATURES_ALL { \
CLIENT_METRIC_TYPE_CAP_INFO, \
CLIENT_METRIC_TYPE_READ_LATENCY, \
CLIENT_METRIC_TYPE_WRITE_LATENCY, \
CLIENT_METRIC_TYPE_METADATA_LATENCY, \
CLIENT_METRIC_TYPE_DENTRY_LEASE, \
CLIENT_METRIC_TYPE_OPENED_FILES, \
CLIENT_METRIC_TYPE_PINNED_ICAPS, \
CLIENT_METRIC_TYPE_OPENED_INODES, \
CLIENT_METRIC_TYPE_READ_IO_SIZES, \
CLIENT_METRIC_TYPE_WRITE_IO_SIZES, \
CLIENT_METRIC_TYPE_AVG_READ_LATENCY, \
CLIENT_METRIC_TYPE_STDEV_READ_LATENCY, \
CLIENT_METRIC_TYPE_AVG_WRITE_LATENCY, \
CLIENT_METRIC_TYPE_STDEV_WRITE_LATENCY, \
CLIENT_METRIC_TYPE_AVG_METADATA_LATENCY, \
CLIENT_METRIC_TYPE_STDEV_METADATA_LATENCY, \
}
#define CEPHFS_FEATURES_MDS_SUPPORTED CEPHFS_FEATURES_ALL
#define CEPHFS_FEATURES_CLIENT_SUPPORTED CEPHFS_FEATURES_ALL
extern std::string_view cephfs_feature_name(size_t id);
extern int cephfs_feature_from_name(std::string_view name);
std::string cephfs_stringify_features(const feature_bitset_t& features);
void cephfs_dump_features(ceph::Formatter *f, const feature_bitset_t& features);
#endif
| 3,629 | 34.940594 | 80 | h |
null | ceph-main/src/mds/flock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_MDS_FLOCK_H
#define CEPH_MDS_FLOCK_H
#include <errno.h>
#include "common/debug.h"
#include "mdstypes.h"
inline std::ostream& operator<<(std::ostream& out, const ceph_filelock& l) {
out << "start: " << l.start << ", length: " << l.length
<< ", client: " << l.client << ", owner: " << l.owner
<< ", pid: " << l.pid << ", type: " << (int)l.type
<< std::endl;
return out;
}
inline bool ceph_filelock_owner_equal(const ceph_filelock& l, const ceph_filelock& r)
{
if (l.client != r.client || l.owner != r.owner)
return false;
// The file lock is from old client if the most significant bit of
// 'owner' is not set. Old clients use both 'owner' and 'pid' to
// identify the owner of lock.
if (l.owner & (1ULL << 63))
return true;
return l.pid == r.pid;
}
inline int ceph_filelock_owner_compare(const ceph_filelock& l, const ceph_filelock& r)
{
if (l.client != r.client)
return l.client > r.client ? 1 : -1;
if (l.owner != r.owner)
return l.owner > r.owner ? 1 : -1;
if (l.owner & (1ULL << 63))
return 0;
if (l.pid != r.pid)
return l.pid > r.pid ? 1 : -1;
return 0;
}
inline int ceph_filelock_compare(const ceph_filelock& l, const ceph_filelock& r)
{
int ret = ceph_filelock_owner_compare(l, r);
if (ret)
return ret;
if (l.start != r.start)
return l.start > r.start ? 1 : -1;
if (l.length != r.length)
return l.length > r.length ? 1 : -1;
if (l.type != r.type)
return l.type > r.type ? 1 : -1;
return 0;
}
inline bool operator<(const ceph_filelock& l, const ceph_filelock& r)
{
return ceph_filelock_compare(l, r) < 0;
}
inline bool operator==(const ceph_filelock& l, const ceph_filelock& r) {
return ceph_filelock_compare(l, r) == 0;
}
inline bool operator!=(const ceph_filelock& l, const ceph_filelock& r) {
return ceph_filelock_compare(l, r) != 0;
}
class ceph_lock_state_t {
public:
explicit ceph_lock_state_t(CephContext *cct_, int type_) : cct(cct_), type(type_) {}
~ceph_lock_state_t();
/**
* Check if a lock is on the waiting_locks list.
*
* @param fl The filelock to check for
* @returns True if the lock is waiting, false otherwise
*/
bool is_waiting(const ceph_filelock &fl) const;
/**
* Remove a lock from the waiting_locks list
*
* @param fl The filelock to remove
*/
void remove_waiting(const ceph_filelock& fl);
/*
* Try to set a new lock. If it's blocked and wait_on_fail is true,
* add the lock to waiting_locks.
* The lock needs to be of type CEPH_LOCK_EXCL or CEPH_LOCK_SHARED.
* This may merge previous locks, or convert the type of already-owned
* locks.
*
* @param new_lock The lock to set
* @param wait_on_fail whether to wait until the lock can be set.
* Otherwise it fails immediately when blocked.
*
* @returns true if set, false if not set.
*/
bool add_lock(ceph_filelock& new_lock, bool wait_on_fail, bool replay,
bool *deadlock);
/**
* See if a lock is blocked by existing locks. If the lock is blocked,
* it will be set to the value of the first blocking lock. Otherwise,
* it will be returned unchanged, except for setting the type field
* to CEPH_LOCK_UNLOCK.
*
* @param testing_lock The lock to check for conflicts on.
*/
void look_for_lock(ceph_filelock& testing_lock);
/*
* Remove lock(s) described in old_lock. This may involve splitting a
* previous lock or making a previous lock smaller.
*
* @param removal_lock The lock to remove
* @param activated_locks A return parameter, holding activated wait locks.
*/
void remove_lock(const ceph_filelock removal_lock,
std::list<ceph_filelock>& activated_locks);
bool remove_all_from(client_t client);
void encode(ceph::bufferlist& bl) const {
using ceph::encode;
encode(held_locks, bl);
encode(client_held_lock_counts, bl);
}
void decode(ceph::bufferlist::const_iterator& bl) {
using ceph::decode;
decode(held_locks, bl);
decode(client_held_lock_counts, bl);
}
bool empty() const {
return held_locks.empty() && waiting_locks.empty() &&
client_held_lock_counts.empty() &&
client_waiting_lock_counts.empty();
}
std::multimap<uint64_t, ceph_filelock> held_locks; // current locks
std::multimap<uint64_t, ceph_filelock> waiting_locks; // locks waiting for other locks
// both of the above are keyed by starting offset
std::map<client_t, int> client_held_lock_counts;
std::map<client_t, int> client_waiting_lock_counts;
private:
static const unsigned MAX_DEADLK_DEPTH = 5;
/**
* Check if adding the lock causes deadlock
*
* @param fl The blocking filelock
* @param overlapping_locks list of all overlapping locks
* @param first_fl
* @depth recursion call depth
*/
bool is_deadlock(const ceph_filelock& fl,
std::list<std::multimap<uint64_t, ceph_filelock>::iterator>&
overlapping_locks,
const ceph_filelock *first_fl=NULL, unsigned depth=0) const;
/**
* Add a lock to the waiting_locks list
*
* @param fl The filelock to add
*/
void add_waiting(const ceph_filelock& fl);
/**
* Adjust old locks owned by a single process so that process can set
* a new lock of different type. Handle any changes needed to the old locks
* (and the new lock) so that once the new lock is inserted into the
* held_locks list the process has a coherent, non-fragmented set of lock
* ranges. Make sure any overlapping locks are combined, trimmed, and removed
* as needed.
* This function should only be called once you know the lock will be
* inserted, as it DOES adjust new_lock. You can call this function
* on an empty list, in which case it does nothing.
* This function does not remove elements from old_locks, so regard the list
* as bad information following function invocation.
*
* @param new_lock The new lock the process has requested.
* @param old_locks list of all locks currently held by same
* client/process that overlap new_lock.
* @param neighbor_locks locks owned by same process that neighbor new_lock on
* left or right side.
*/
void adjust_locks(std::list<std::multimap<uint64_t, ceph_filelock>::iterator> old_locks,
ceph_filelock& new_lock,
std::list<std::multimap<uint64_t, ceph_filelock>::iterator>
neighbor_locks);
//get last lock prior to start position
std::multimap<uint64_t, ceph_filelock>::iterator
get_lower_bound(uint64_t start,
std::multimap<uint64_t, ceph_filelock>& lock_map);
//get latest-starting lock that goes over the byte "end"
std::multimap<uint64_t, ceph_filelock>::iterator
get_last_before(uint64_t end,
std::multimap<uint64_t, ceph_filelock>& lock_map);
/*
* See if an iterator's lock covers any of the same bounds as a given range
* Rules: locks cover "length" bytes from "start", so the last covered
* byte is at start + length - 1.
* If the length is 0, the lock covers from "start" to the end of the file.
*/
bool share_space(std::multimap<uint64_t, ceph_filelock>::iterator& iter,
uint64_t start, uint64_t end);
bool share_space(std::multimap<uint64_t, ceph_filelock>::iterator& iter,
const ceph_filelock &lock) {
uint64_t end = lock.start;
if (lock.length) {
end += lock.length - 1;
} else { // zero length means end of file
end = uint64_t(-1);
}
return share_space(iter, lock.start, end);
}
/*
*get a list of all locks overlapping with the given lock's range
* lock: the lock to compare with.
* overlaps: an empty list, to be filled.
* Returns: true if at least one lock overlaps.
*/
bool get_overlapping_locks(const ceph_filelock& lock,
std::list<std::multimap<uint64_t,
ceph_filelock>::iterator> & overlaps,
std::list<std::multimap<uint64_t,
ceph_filelock>::iterator> *self_neighbors);
bool get_overlapping_locks(const ceph_filelock& lock,
std::list<std::multimap<uint64_t, ceph_filelock>::iterator>& overlaps) {
return get_overlapping_locks(lock, overlaps, NULL);
}
/**
* Get a list of all waiting locks that overlap with the given lock's range.
* lock: specifies the range to compare with
* overlaps: an empty list, to be filled
* Returns: true if at least one waiting_lock overlaps
*/
bool get_waiting_overlaps(const ceph_filelock& lock,
std::list<std::multimap<uint64_t,
ceph_filelock>::iterator>& overlaps);
/*
* split a list of locks up by whether they're owned by same
* process as given lock
* owner: the owning lock
* locks: the list of locks (obtained from get_overlapping_locks, probably)
* Will have all locks owned by owner removed
* owned_locks: an empty list, to be filled with the locks owned by owner
*/
void split_by_owner(const ceph_filelock& owner,
std::list<std::multimap<uint64_t,
ceph_filelock>::iterator> & locks,
std::list<std::multimap<uint64_t,
ceph_filelock>::iterator> & owned_locks);
ceph_filelock *contains_exclusive_lock(std::list<std::multimap<uint64_t,
ceph_filelock>::iterator>& locks);
CephContext *cct;
int type;
};
WRITE_CLASS_ENCODER(ceph_lock_state_t)
inline std::ostream& operator<<(std::ostream &out, const ceph_lock_state_t &l) {
out << "ceph_lock_state_t. held_locks.size()=" << l.held_locks.size()
<< ", waiting_locks.size()=" << l.waiting_locks.size()
<< ", client_held_lock_counts -- " << l.client_held_lock_counts
<< "\n client_waiting_lock_counts -- " << l.client_waiting_lock_counts
<< "\n held_locks -- ";
for (auto iter = l.held_locks.begin();
iter != l.held_locks.end();
++iter)
out << iter->second;
out << "\n waiting_locks -- ";
for (auto iter =l.waiting_locks.begin();
iter != l.waiting_locks.end();
++iter)
out << iter->second << "\n";
return out;
}
#endif
| 10,330 | 34.624138 | 90 | h |
null | ceph-main/src/mds/fscrypt.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPHFS_FSCRYPT_H
#define CEPHFS_FSCRYPT_H
struct ceph_fscrypt_last_block_header {
__u8 ver;
__u8 compat;
/* If the last block is located in a file hole the length
* will be sizeof(i_version + file_offset + block_size),
* or will plus to extra BLOCK SIZE.
*/
uint32_t data_len;
/* inode change attr version */
uint64_t change_attr;
/*
* For a file hole, this will be 0, or it will be the offset from
* which will write the last block
*/
uint64_t file_offset;
/* It should always be the fscrypt block size */
uint32_t block_size;
};
#endif
| 995 | 22.714286 | 70 | h |
null | ceph-main/src/mds/inode_backtrace.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_INODE_BACKTRACE_H
#define CEPH_INODE_BACKTRACE_H
#include <string_view>
#include "mdstypes.h"
namespace ceph {
class Formatter;
}
/** metadata backpointers **/
/*
* - inode_backpointer_t is just the _pointer_ portion; it doesn't
* tell us who we point _from_.
*
* - it _does_ include a version of the source object, so we can look
* at two different pointers (from the same inode) and tell which is
* newer.
*/
struct inode_backpointer_t {
inode_backpointer_t() {}
inode_backpointer_t(inodeno_t i, std::string_view d, version_t v) : dirino(i), dname(d), version(v) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void decode_old(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<inode_backpointer_t*>& ls);
inodeno_t dirino; // containing directory ino
std::string dname; // linking dentry name
version_t version = 0; // child's version at time of backpointer creation
};
WRITE_CLASS_ENCODER(inode_backpointer_t)
inline bool operator==(const inode_backpointer_t& l, const inode_backpointer_t& r) {
return l.dirino == r.dirino && l.version == r.version && l.dname == r.dname;
}
inline std::ostream& operator<<(std::ostream& out, const inode_backpointer_t& ib) {
return out << "<" << ib.dirino << "/" << ib.dname << " v" << ib.version << ">";
}
/*
* inode_backtrace_t is a complete ancestor backtraces for a given inode.
* we include who _we_ are, so that the backtrace can stand alone (as, say,
* an xattr on an object).
*/
struct inode_backtrace_t {
inode_backtrace_t() {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<inode_backtrace_t*>& ls);
/**
* Compare two backtraces *for the same inode*.
* @pre The backtraces are for the same inode
*
* @param other The backtrace to compare ourselves with
* @param equivalent A bool pointer which will be set to true if
* the other backtrace is equivalent to our own (has the same dentries)
* @param divergent A bool pointer which will be set to true if
* the backtraces have differing entries without versions supporting them
*
* @returns 1 if we are newer than the other, 0 if equal, -1 if older
*/
int compare(const inode_backtrace_t& other,
bool *equivalent, bool *divergent) const;
void clear() {
ancestors.clear();
old_pools.clear();
}
inodeno_t ino; // my ino
std::vector<inode_backpointer_t> ancestors;
int64_t pool = -1;
std::vector<int64_t> old_pools;
};
WRITE_CLASS_ENCODER(inode_backtrace_t)
inline std::ostream& operator<<(std::ostream& out, const inode_backtrace_t& it) {
return out << "(" << it.pool << ")" << it.ino << ":" << it.ancestors << "//" << it.old_pools;
}
inline bool operator==(const inode_backtrace_t& l,
const inode_backtrace_t& r) {
return l.ino == r.ino &&
l.pool == r.pool &&
l.old_pools == r.old_pools &&
l.ancestors == r.ancestors;
}
#endif
| 3,291 | 31.594059 | 104 | h |
null | ceph-main/src/mds/locks.h | #ifndef CEPH_MDS_LOCKS_H
#define CEPH_MDS_LOCKS_H
#include <stdbool.h>
struct sm_state_t {
int next; // 0 if stable
bool loner;
int replica_state;
char can_read;
char can_read_projected;
char can_rdlock;
char can_wrlock;
char can_force_wrlock;
char can_lease;
char can_xlock;
int caps;
int loner_caps;
int xlocker_caps;
int replica_caps;
};
struct sm_t {
const struct sm_state_t *states;
int allowed_ever_auth;
int allowed_ever_replica;
int careful;
int can_remote_xlock;
};
#define ANY 1 // auth or replica
#define AUTH 2 // auth only
#define XCL 3 // auth or exclusive client
//#define FW 4 // fw to auth, if replica
#define REQ 5 // req state change from auth, if replica
extern const struct sm_t sm_simplelock;
extern const struct sm_t sm_filelock;
extern const struct sm_t sm_scatterlock;
extern const struct sm_t sm_locallock;
// -- lock states --
// sync <-> lock
enum {
LOCK_UNDEF = 0,
// auth rep
LOCK_SYNC, // AR R . RD L . / C . R RD L . / C .
LOCK_LOCK, // AR R . .. . X / . . . .. . . / . .
LOCK_PREXLOCK, // A . . .. . . / . . (lock)
LOCK_XLOCK, // A . . .. . . / . . (lock)
LOCK_XLOCKDONE, // A r p rd l x / . . (lock) <-- by same client only!!
LOCK_XLOCKSNAP, // also revoke Fb
LOCK_LOCK_XLOCK,
LOCK_SYNC_LOCK, // AR R . .. . . / . . R .. . . / . .
LOCK_LOCK_SYNC, // A R p rd l . / . . (lock) <-- lc by same client only
LOCK_EXCL, // A . . .. . . / c x * (lock)
LOCK_EXCL_SYNC, // A . . .. . . / c . * (lock)
LOCK_EXCL_LOCK, // A . . .. . . / . . (lock)
LOCK_SYNC_EXCL, // Ar R . .. . . / c . * (sync->lock)
LOCK_LOCK_EXCL, // A R . .. . . / . . (lock)
LOCK_REMOTEXLOCK, // on NON-auth
// * = loner mode
LOCK_MIX,
LOCK_SYNC_MIX,
LOCK_SYNC_MIX2,
LOCK_LOCK_MIX,
LOCK_EXCL_MIX,
LOCK_MIX_SYNC,
LOCK_MIX_SYNC2,
LOCK_MIX_LOCK,
LOCK_MIX_LOCK2,
LOCK_MIX_EXCL,
LOCK_TSYN,
LOCK_TSYN_LOCK,
LOCK_TSYN_MIX,
LOCK_LOCK_TSYN,
LOCK_MIX_TSYN,
LOCK_PRE_SCAN,
LOCK_SCAN,
LOCK_SNAP_SYNC,
LOCK_XSYN,
LOCK_XSYN_EXCL,
LOCK_EXCL_XSYN,
LOCK_XSYN_SYNC,
LOCK_XSYN_LOCK,
LOCK_XSYN_MIX,
LOCK_MAX,
};
// -------------------------
// lock actions
// for replicas
#define LOCK_AC_SYNC -1
#define LOCK_AC_MIX -2
#define LOCK_AC_LOCK -3
#define LOCK_AC_LOCKFLUSHED -4
// for auth
#define LOCK_AC_SYNCACK 1
#define LOCK_AC_MIXACK 2
#define LOCK_AC_LOCKACK 3
#define LOCK_AC_REQSCATTER 7
#define LOCK_AC_REQUNSCATTER 8
#define LOCK_AC_NUDGE 9
#define LOCK_AC_REQRDLOCK 10
#define LOCK_AC_FOR_REPLICA(a) ((a) < 0)
#define LOCK_AC_FOR_AUTH(a) ((a) > 0)
#endif
| 2,801 | 21.062992 | 82 | h |
null | ceph-main/src/mds/mds_table_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDSTABLETYPES_H
#define CEPH_MDSTABLETYPES_H
// MDS TABLES
#include <string_view>
enum {
TABLE_ANCHOR,
TABLE_SNAP,
};
inline std::string_view get_mdstable_name(int t) {
switch (t) {
case TABLE_ANCHOR: return "anchortable";
case TABLE_SNAP: return "snaptable";
default: ceph_abort(); return std::string_view();
}
}
enum {
TABLESERVER_OP_QUERY = 1,
TABLESERVER_OP_QUERY_REPLY = -2,
TABLESERVER_OP_PREPARE = 3,
TABLESERVER_OP_AGREE = -4,
TABLESERVER_OP_COMMIT = 5,
TABLESERVER_OP_ACK = -6,
TABLESERVER_OP_ROLLBACK = 7,
TABLESERVER_OP_SERVER_UPDATE = 8,
TABLESERVER_OP_SERVER_READY = -9,
TABLESERVER_OP_NOTIFY_ACK = 10,
TABLESERVER_OP_NOTIFY_PREP = -11,
};
inline std::string_view get_mdstableserver_opname(int op) {
switch (op) {
case TABLESERVER_OP_QUERY: return "query";
case TABLESERVER_OP_QUERY_REPLY: return "query_reply";
case TABLESERVER_OP_PREPARE: return "prepare";
case TABLESERVER_OP_AGREE: return "agree";
case TABLESERVER_OP_COMMIT: return "commit";
case TABLESERVER_OP_ACK: return "ack";
case TABLESERVER_OP_ROLLBACK: return "rollback";
case TABLESERVER_OP_SERVER_UPDATE: return "server_update";
case TABLESERVER_OP_SERVER_READY: return "server_ready";
case TABLESERVER_OP_NOTIFY_ACK: return "notify_ack";
case TABLESERVER_OP_NOTIFY_PREP: return "notify_prep";
default: ceph_abort(); return std::string_view();
}
}
enum {
TABLE_OP_CREATE,
TABLE_OP_UPDATE,
TABLE_OP_DESTROY,
};
inline std::string_view get_mdstable_opname(int op) {
switch (op) {
case TABLE_OP_CREATE: return "create";
case TABLE_OP_UPDATE: return "update";
case TABLE_OP_DESTROY: return "destroy";
default: ceph_abort(); return std::string_view();
}
}
#endif
| 2,215 | 26.02439 | 71 | h |
null | ceph-main/src/mds/snap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_SNAP_H
#define CEPH_MDS_SNAP_H
#include <map>
#include <string_view>
#include "mdstypes.h"
#include "common/snap_types.h"
#include "Capability.h"
/*
* generic snap descriptor.
*/
struct SnapInfo {
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<SnapInfo*>& ls);
std::string_view get_long_name() const;
snapid_t snapid;
inodeno_t ino;
utime_t stamp;
std::string name;
std::string alternate_name;
mutable std::string long_name; ///< cached _$ino_$name
std::map<std::string,std::string> metadata;
};
WRITE_CLASS_ENCODER(SnapInfo)
inline bool operator==(const SnapInfo &l, const SnapInfo &r)
{
return l.snapid == r.snapid && l.ino == r.ino &&
l.stamp == r.stamp && l.name == r.name;
}
std::ostream& operator<<(std::ostream& out, const SnapInfo &sn);
/*
* SnapRealm - a subtree that shares the same set of snapshots.
*/
struct SnapRealm;
struct snaplink_t {
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<snaplink_t*>& ls);
inodeno_t ino;
snapid_t first;
};
WRITE_CLASS_ENCODER(snaplink_t)
std::ostream& operator<<(std::ostream& out, const snaplink_t &l);
// carry data about a specific version of a SnapRealm
struct sr_t {
void mark_parent_global() { flags |= PARENT_GLOBAL; }
void clear_parent_global() { flags &= ~PARENT_GLOBAL; }
bool is_parent_global() const { return flags & PARENT_GLOBAL; }
void mark_subvolume() { flags |= SUBVOLUME; }
void clear_subvolume() { flags &= ~SUBVOLUME; }
bool is_subvolume() const { return flags & SUBVOLUME; }
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<sr_t*>& ls);
snapid_t seq = 0; // basically, a version/seq # for changes to _this_ realm.
snapid_t created = 0; // when this realm was created.
snapid_t last_created = 0; // last snap created in _this_ realm.
snapid_t last_destroyed = 0; // seq for last removal
snapid_t current_parent_since = 1;
std::map<snapid_t, SnapInfo> snaps;
std::map<snapid_t, snaplink_t> past_parents; // key is "last" (or NOSNAP)
std::set<snapid_t> past_parent_snaps;
utime_t last_modified; // timestamp when this realm
// was last changed.
uint64_t change_attr = 0; // tracks changes to snap
// realm attrs.
__u32 flags = 0;
enum {
PARENT_GLOBAL = 1 << 0,
SUBVOLUME = 1 << 1,
};
};
WRITE_CLASS_ENCODER(sr_t)
class MDCache;
#endif
| 3,321 | 28.660714 | 98 | h |
null | ceph-main/src/mds/events/ECommitted.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ECOMMITTED_H
#define CEPH_MDS_ECOMMITTED_H
#include "../LogEvent.h"
#include "EMetaBlob.h"
class ECommitted : public LogEvent {
public:
metareqid_t reqid;
ECommitted() : LogEvent(EVENT_COMMITTED) { }
explicit ECommitted(metareqid_t r) :
LogEvent(EVENT_COMMITTED), reqid(r) { }
void print(std::ostream& out) const override {
out << "ECommitted " << reqid;
}
void encode(bufferlist &bl, uint64_t features) const override;
void decode(bufferlist::const_iterator &bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<ECommitted*>& ls);
void update_segment() override {}
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(ECommitted)
#endif
| 1,183 | 25.909091 | 71 | h |
null | ceph-main/src/mds/events/EExport.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_EEXPORT_H
#define CEPH_EEXPORT_H
#include "common/config.h"
#include "include/types.h"
#include "../MDSRank.h"
#include "EMetaBlob.h"
#include "../LogEvent.h"
class EExport : public LogEvent {
public:
EMetaBlob metablob; // exported dir
protected:
dirfrag_t base;
std::set<dirfrag_t> bounds;
mds_rank_t target;
public:
EExport() :
LogEvent(EVENT_EXPORT), target(MDS_RANK_NONE) { }
EExport(MDLog *mdlog, CDir *dir, mds_rank_t t) :
LogEvent(EVENT_EXPORT),
base(dir->dirfrag()), target(t) { }
std::set<dirfrag_t> &get_bounds() { return bounds; }
void print(std::ostream& out) const override {
out << "EExport " << base << " to mds." << target << " " << metablob;
}
EMetaBlob *get_metablob() override { return &metablob; }
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator &bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EExport*>& ls);
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EExport)
#endif
| 1,532 | 24.983051 | 73 | h |
null | ceph-main/src/mds/events/EFragment.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_EFRAGMENT_H
#define CEPH_MDS_EFRAGMENT_H
#include "../LogEvent.h"
#include "EMetaBlob.h"
struct dirfrag_rollback {
CDir::fnode_const_ptr fnode;
dirfrag_rollback() { }
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& bl);
};
WRITE_CLASS_ENCODER(dirfrag_rollback)
class EFragment : public LogEvent {
public:
EMetaBlob metablob;
__u8 op{0};
inodeno_t ino;
frag_t basefrag;
__s32 bits{0}; // positive for split (from basefrag), negative for merge (to basefrag)
frag_vec_t orig_frags;
bufferlist rollback;
EFragment() : LogEvent(EVENT_FRAGMENT) { }
EFragment(MDLog *mdlog, int o, dirfrag_t df, int b) :
LogEvent(EVENT_FRAGMENT),
op(o), ino(df.ino), basefrag(df.frag), bits(b) { }
void print(std::ostream& out) const override {
out << "EFragment " << op_name(op) << " " << ino << " " << basefrag << " by " << bits << " " << metablob;
}
enum {
OP_PREPARE = 1,
OP_COMMIT = 2,
OP_ROLLBACK = 3,
OP_FINISH = 4 // finish deleting orphan dirfrags
};
static std::string_view op_name(int o) {
switch (o) {
case OP_PREPARE: return "prepare";
case OP_COMMIT: return "commit";
case OP_ROLLBACK: return "rollback";
case OP_FINISH: return "finish";
default: return "???";
}
}
void add_orig_frag(frag_t df, dirfrag_rollback *drb=NULL) {
using ceph::encode;
orig_frags.push_back(df);
if (drb)
encode(*drb, rollback);
}
EMetaBlob *get_metablob() override { return &metablob; }
void encode(bufferlist &bl, uint64_t features) const override;
void decode(bufferlist::const_iterator &bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EFragment*>& ls);
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EFragment)
#endif
| 2,279 | 26.804878 | 109 | h |
null | ceph-main/src/mds/events/EImportFinish.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_EIMPORTFINISH_H
#define CEPH_EIMPORTFINISH_H
#include "common/config.h"
#include "include/types.h"
#include "../MDSRank.h"
#include "../LogEvent.h"
class EImportFinish : public LogEvent {
protected:
dirfrag_t base; // imported dir
bool success;
public:
EImportFinish(CDir *dir, bool s) : LogEvent(EVENT_IMPORTFINISH),
base(dir->dirfrag()),
success(s) { }
EImportFinish() : LogEvent(EVENT_IMPORTFINISH), base(), success(false) { }
void print(std::ostream& out) const override {
out << "EImportFinish " << base;
if (success)
out << " success";
else
out << " failed";
}
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator &bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EImportFinish*>& ls);
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EImportFinish)
#endif
| 1,404 | 25.018519 | 76 | h |
null | ceph-main/src/mds/events/EImportStart.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_EIMPORTSTART_H
#define CEPH_EIMPORTSTART_H
#include "common/config.h"
#include "include/types.h"
class MDLog;
class MDSRank;
#include "EMetaBlob.h"
#include "../LogEvent.h"
class EImportStart : public LogEvent {
protected:
dirfrag_t base;
std::vector<dirfrag_t> bounds;
mds_rank_t from;
public:
EMetaBlob metablob;
bufferlist client_map; // encoded map<__u32,entity_inst_t>
version_t cmapv{0};
EImportStart(MDLog *log, dirfrag_t di, const std::vector<dirfrag_t>& b, mds_rank_t f) :
LogEvent(EVENT_IMPORTSTART),
base(di), bounds(b), from(f) { }
EImportStart() :
LogEvent(EVENT_IMPORTSTART), from(MDS_RANK_NONE) { }
void print(std::ostream& out) const override {
out << "EImportStart " << base << " from mds." << from << " " << metablob;
}
EMetaBlob *get_metablob() override { return &metablob; }
void encode(bufferlist &bl, uint64_t features) const override;
void decode(bufferlist::const_iterator &bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EImportStart*>& ls);
void update_segment() override;
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EImportStart)
#endif
| 1,651 | 25.645161 | 89 | h |
null | ceph-main/src/mds/events/ENoOp.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ENOOP_H
#define CEPH_MDS_ENOOP_H
#include "../LogEvent.h"
class ENoOp : public LogEvent {
uint32_t pad_size;
public:
ENoOp() : LogEvent(EVENT_NOOP), pad_size(0) { }
explicit ENoOp(uint32_t size_) : LogEvent(EVENT_NOOP), pad_size(size_){ }
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override {}
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(ENoOp)
#endif
| 946 | 25.305556 | 75 | h |
null | ceph-main/src/mds/events/EOpen.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_EOPEN_H
#define CEPH_MDS_EOPEN_H
#include "../LogEvent.h"
#include "EMetaBlob.h"
class EOpen : public LogEvent {
public:
EMetaBlob metablob;
std::vector<inodeno_t> inos;
std::vector<vinodeno_t> snap_inos;
EOpen() : LogEvent(EVENT_OPEN) { }
explicit EOpen(MDLog *mdlog) :
LogEvent(EVENT_OPEN) { }
void print(std::ostream& out) const override {
out << "EOpen " << metablob << ", " << inos.size() << " open files";
}
EMetaBlob *get_metablob() override { return &metablob; }
void add_clean_inode(CInode *in) {
if (!in->is_base()) {
metablob.add_dir_context(in->get_projected_parent_dn()->get_dir());
metablob.add_primary_dentry(in->get_projected_parent_dn(), 0, false);
if (in->last == CEPH_NOSNAP)
inos.push_back(in->ino());
else
snap_inos.push_back(in->vino());
}
}
void add_ino(inodeno_t ino) {
inos.push_back(ino);
}
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EOpen*>& ls);
void update_segment() override;
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EOpen)
#endif
| 1,685 | 26.193548 | 75 | h |
null | ceph-main/src/mds/events/EPeerUpdate.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_EPEERUPDATE_H
#define CEPH_MDS_EPEERUPDATE_H
#include <string_view>
#include "../LogEvent.h"
#include "EMetaBlob.h"
/*
* rollback records, for remote/peer updates, which may need to be manually
* rolled back during journal replay. (or while active if leader fails, but in
* that case these records aren't needed.)
*/
struct link_rollback {
metareqid_t reqid;
inodeno_t ino;
bool was_inc;
utime_t old_ctime;
utime_t old_dir_mtime;
utime_t old_dir_rctime;
bufferlist snapbl;
link_rollback() : ino(0), was_inc(false) {}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& bl);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<link_rollback*>& ls);
};
WRITE_CLASS_ENCODER(link_rollback)
/*
* this is only used on an empty dir with a dirfrag on a remote node.
* we are auth for nothing. all we need to do is relink the directory
* in the hierarchy properly during replay to avoid breaking the
* subtree map.
*/
struct rmdir_rollback {
metareqid_t reqid;
dirfrag_t src_dir;
std::string src_dname;
dirfrag_t dest_dir;
std::string dest_dname;
bufferlist snapbl;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& bl);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<rmdir_rollback*>& ls);
};
WRITE_CLASS_ENCODER(rmdir_rollback)
struct rename_rollback {
struct drec {
dirfrag_t dirfrag;
utime_t dirfrag_old_mtime;
utime_t dirfrag_old_rctime;
inodeno_t ino, remote_ino;
std::string dname;
char remote_d_type;
utime_t old_ctime;
drec() : remote_d_type((char)S_IFREG) {}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& bl);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<drec*>& ls);
};
WRITE_CLASS_MEMBER_ENCODER(drec)
metareqid_t reqid;
drec orig_src, orig_dest;
drec stray; // we know this is null, but we want dname, old mtime/rctime
utime_t ctime;
bufferlist srci_snapbl;
bufferlist desti_snapbl;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& bl);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<rename_rollback*>& ls);
};
WRITE_CLASS_ENCODER(rename_rollback::drec)
WRITE_CLASS_ENCODER(rename_rollback)
class EPeerUpdate : public LogEvent {
public:
const static int OP_PREPARE = 1;
const static int OP_COMMIT = 2;
const static int OP_ROLLBACK = 3;
const static int LINK = 1;
const static int RENAME = 2;
const static int RMDIR = 3;
/*
* we journal a rollback metablob that contains the unmodified metadata
* too, because we may be updating previously dirty metadata, which
* will allow old log segments to be trimmed. if we end of rolling back,
* those updates could be lost.. so we re-journal the unmodified metadata,
* and replay will apply _either_ commit or rollback.
*/
EMetaBlob commit;
bufferlist rollback;
std::string type;
metareqid_t reqid;
mds_rank_t leader;
__u8 op; // prepare, commit, abort
__u8 origop; // link | rename
EPeerUpdate() : LogEvent(EVENT_PEERUPDATE), leader(0), op(0), origop(0) { }
EPeerUpdate(MDLog *mdlog, std::string_view s, metareqid_t ri, int leadermds, int o, int oo) :
LogEvent(EVENT_PEERUPDATE),
type(s),
reqid(ri),
leader(leadermds),
op(o), origop(oo) { }
void print(std::ostream& out) const override {
if (type.length())
out << type << " ";
out << " " << (int)op;
if (origop == LINK) out << " link";
if (origop == RENAME) out << " rename";
out << " " << reqid;
out << " for mds." << leader;
out << commit;
}
EMetaBlob *get_metablob() override { return &commit; }
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EPeerUpdate*>& ls);
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EPeerUpdate)
#endif
| 4,541 | 27.746835 | 95 | h |
null | ceph-main/src/mds/events/EPurged.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_EPURGE_H
#define CEPH_MDS_EPURGE_H
#include "common/config.h"
#include "include/types.h"
#include "../LogEvent.h"
class EPurged : public LogEvent {
public:
EPurged() : LogEvent(EVENT_PURGED) { }
EPurged(const interval_set<inodeno_t>& _inos, LogSegment::seq_t _seq, version_t iv)
: LogEvent(EVENT_PURGED), inos(_inos), seq(_seq), inotablev(iv) {
}
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
void print(std::ostream& out) const override {
out << "Eurged " << inos.size() << " inos, inotable v" << inotablev;
}
void update_segment() override;
void replay(MDSRank *mds) override;
protected:
interval_set<inodeno_t> inos;
LogSegment::seq_t seq;
version_t inotablev{0};
};
WRITE_CLASS_ENCODER_FEATURES(EPurged)
#endif // CEPH_MDS_EPURGE_H
| 1,330 | 27.319149 | 85 | h |
null | ceph-main/src/mds/events/EResetJournal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ERESETJOURNAL_H
#define CEPH_MDS_ERESETJOURNAL_H
#include "../LogEvent.h"
// generic log event
class EResetJournal : public LogEvent {
public:
EResetJournal() : LogEvent(EVENT_RESETJOURNAL) { }
~EResetJournal() override {}
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EResetJournal*>& ls);
void print(std::ostream& out) const override {
out << "EResetJournal";
}
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EResetJournal)
#endif
| 1,086 | 26.175 | 71 | h |
null | ceph-main/src/mds/events/ESession.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ESESSION_H
#define CEPH_MDS_ESESSION_H
#include "common/config.h"
#include "include/types.h"
#include "../LogEvent.h"
class ESession : public LogEvent {
protected:
entity_inst_t client_inst;
bool open; // open or close
version_t cmapv{0}; // client map version
interval_set<inodeno_t> inos_to_free;
version_t inotablev{0};
interval_set<inodeno_t> inos_to_purge;
// Client metadata stored during open
client_metadata_t client_metadata;
public:
ESession() : LogEvent(EVENT_SESSION), open(false) { }
ESession(const entity_inst_t& inst, bool o, version_t v,
const client_metadata_t& cm) :
LogEvent(EVENT_SESSION),
client_inst(inst), open(o), cmapv(v), inotablev(0),
client_metadata(cm) { }
ESession(const entity_inst_t& inst, bool o, version_t v,
const interval_set<inodeno_t>& to_free, version_t iv,
const interval_set<inodeno_t>& to_purge) :
LogEvent(EVENT_SESSION), client_inst(inst), open(o), cmapv(v),
inos_to_free(to_free), inotablev(iv), inos_to_purge(to_purge) {}
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<ESession*>& ls);
void print(std::ostream& out) const override {
if (open)
out << "ESession " << client_inst << " open cmapv " << cmapv;
else
out << "ESession " << client_inst << " close cmapv " << cmapv;
if (inos_to_free.size() || inos_to_purge.size())
out << " (" << inos_to_free.size() << " to free, v" << inotablev
<< ", " << inos_to_purge.size() << " to purge)";
}
void update_segment() override;
void replay(MDSRank *mds) override;
entity_inst_t get_client_inst() const {return client_inst;}
};
WRITE_CLASS_ENCODER_FEATURES(ESession)
#endif
| 2,289 | 30.805556 | 71 | h |
null | ceph-main/src/mds/events/ESessions.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ESESSIONS_H
#define CEPH_MDS_ESESSIONS_H
#include "common/config.h"
#include "include/types.h"
#include "../LogEvent.h"
class ESessions : public LogEvent {
protected:
version_t cmapv; // client map version
bool old_style_encode;
public:
std::map<client_t,entity_inst_t> client_map;
std::map<client_t,client_metadata_t> client_metadata_map;
ESessions() : LogEvent(EVENT_SESSIONS), cmapv(0), old_style_encode(false) { }
ESessions(version_t pv, std::map<client_t,entity_inst_t>&& cm,
std::map<client_t,client_metadata_t>&& cmm) :
LogEvent(EVENT_SESSIONS),
cmapv(pv), old_style_encode(false),
client_map(std::move(cm)),
client_metadata_map(std::move(cmm)) {}
void mark_old_encoding() { old_style_encode = true; }
void encode(bufferlist &bl, uint64_t features) const override;
void decode_old(bufferlist::const_iterator &bl);
void decode_new(bufferlist::const_iterator &bl);
void decode(bufferlist::const_iterator &bl) override {
if (old_style_encode) decode_old(bl);
else decode_new(bl);
}
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<ESessions*>& ls);
void print(std::ostream& out) const override {
out << "ESessions " << client_map.size() << " opens cmapv " << cmapv;
}
void update_segment() override;
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(ESessions)
#endif
| 1,857 | 28.967742 | 79 | h |
null | ceph-main/src/mds/events/ESubtreeMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ESUBTREEMAP_H
#define CEPH_MDS_ESUBTREEMAP_H
#include "../LogEvent.h"
#include "EMetaBlob.h"
class ESubtreeMap : public LogEvent {
public:
EMetaBlob metablob;
std::map<dirfrag_t, std::vector<dirfrag_t> > subtrees;
std::set<dirfrag_t> ambiguous_subtrees;
uint64_t expire_pos;
uint64_t event_seq;
ESubtreeMap() : LogEvent(EVENT_SUBTREEMAP), expire_pos(0), event_seq(0) { }
void print(std::ostream& out) const override {
out << "ESubtreeMap " << subtrees.size() << " subtrees "
<< ", " << ambiguous_subtrees.size() << " ambiguous "
<< metablob;
}
EMetaBlob *get_metablob() override { return &metablob; }
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<ESubtreeMap*>& ls);
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(ESubtreeMap)
#endif
| 1,403 | 27.653061 | 77 | h |
null | ceph-main/src/mds/events/ETableClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ETABLECLIENT_H
#define CEPH_MDS_ETABLECLIENT_H
#include "common/config.h"
#include "include/types.h"
#include "../mds_table_types.h"
#include "../LogEvent.h"
struct ETableClient : public LogEvent {
__u16 table;
__s16 op;
version_t tid;
ETableClient() : LogEvent(EVENT_TABLECLIENT), table(0), op(0), tid(0) { }
ETableClient(int t, int o, version_t ti) :
LogEvent(EVENT_TABLECLIENT),
table(t), op(o), tid(ti) { }
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<ETableClient*>& ls);
void print(std::ostream& out) const override {
out << "ETableClient " << get_mdstable_name(table) << " " << get_mdstableserver_opname(op);
if (tid) out << " tid " << tid;
}
//void update_segment();
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(ETableClient)
#endif
| 1,422 | 27.46 | 95 | h |
null | ceph-main/src/mds/events/ETableServer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_ETABLESERVER_H
#define CEPH_MDS_ETABLESERVER_H
#include "common/config.h"
#include "include/types.h"
#include "../mds_table_types.h"
#include "../LogEvent.h"
struct ETableServer : public LogEvent {
__u16 table;
__s16 op;
uint64_t reqid;
mds_rank_t bymds;
bufferlist mutation;
version_t tid;
version_t version;
ETableServer() : LogEvent(EVENT_TABLESERVER), table(0), op(0),
reqid(0), bymds(MDS_RANK_NONE), tid(0), version(0) { }
ETableServer(int t, int o, uint64_t ri, mds_rank_t m, version_t ti, version_t v) :
LogEvent(EVENT_TABLESERVER),
table(t), op(o), reqid(ri), bymds(m), tid(ti), version(v) { }
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<ETableServer*>& ls);
void print(std::ostream& out) const override {
out << "ETableServer " << get_mdstable_name(table)
<< " " << get_mdstableserver_opname(op);
if (reqid) out << " reqid " << reqid;
if (bymds >= 0) out << " mds." << bymds;
if (tid) out << " tid " << tid;
if (version) out << " version " << version;
if (mutation.length()) out << " mutation=" << mutation.length() << " bytes";
}
void update_segment() override;
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(ETableServer)
#endif
| 1,851 | 29.866667 | 84 | h |
null | ceph-main/src/mds/events/EUpdate.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDS_EUPDATE_H
#define CEPH_MDS_EUPDATE_H
#include <string_view>
#include "../LogEvent.h"
#include "EMetaBlob.h"
class EUpdate : public LogEvent {
public:
EMetaBlob metablob;
std::string type;
bufferlist client_map;
version_t cmapv;
metareqid_t reqid;
bool had_peers;
EUpdate() : LogEvent(EVENT_UPDATE), cmapv(0), had_peers(false) { }
EUpdate(MDLog *mdlog, std::string_view s) :
LogEvent(EVENT_UPDATE),
type(s), cmapv(0), had_peers(false) { }
void print(std::ostream& out) const override {
if (type.length())
out << "EUpdate " << type << " ";
out << metablob;
}
EMetaBlob *get_metablob() override { return &metablob; }
void encode(bufferlist& bl, uint64_t features) const override;
void decode(bufferlist::const_iterator& bl) override;
void dump(Formatter *f) const override;
static void generate_test_instances(std::list<EUpdate*>& ls);
void update_segment() override;
void replay(MDSRank *mds) override;
};
WRITE_CLASS_ENCODER_FEATURES(EUpdate)
#endif
| 1,462 | 25.125 | 71 | h |
null | ceph-main/src/messages/MAuth.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MAUTH_H
#define CEPH_MAUTH_H
#include <string_view>
#include "include/encoding.h"
#include "msg/Message.h"
#include "msg/MessageRef.h"
#include "messages/PaxosServiceMessage.h"
class MAuth final : public PaxosServiceMessage {
public:
__u32 protocol;
ceph::buffer::list auth_payload;
epoch_t monmap_epoch;
/* if protocol == 0, then auth_payload is a set<__u32> listing protocols the client supports */
MAuth() : PaxosServiceMessage{CEPH_MSG_AUTH, 0}, protocol(0), monmap_epoch(0) { }
private:
~MAuth() final {}
public:
std::string_view get_type_name() const override { return "auth"; }
void print(std::ostream& out) const override {
out << "auth(proto " << protocol << " " << auth_payload.length() << " bytes"
<< " epoch " << monmap_epoch << ")";
}
void decode_payload() override {
using ceph::decode;
auto p = payload.cbegin();
paxos_decode(p);
decode(protocol, p);
decode(auth_payload, p);
if (!p.end())
decode(monmap_epoch, p);
else
monmap_epoch = 0;
}
void encode_payload(uint64_t features) override {
using ceph::encode;
paxos_encode();
encode(protocol, payload);
encode(auth_payload, payload);
encode(monmap_epoch, payload);
}
ceph::buffer::list& get_auth_payload() { return auth_payload; }
};
#endif
| 1,750 | 25.134328 | 97 | h |
null | ceph-main/src/messages/MAuthReply.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MAUTHREPLY_H
#define CEPH_MAUTHREPLY_H
#include "msg/Message.h"
#include "common/errno.h"
class MAuthReply final : public Message {
public:
__u32 protocol;
errorcode32_t result;
uint64_t global_id; // if zero, meaningless
std::string result_msg;
ceph::buffer::list result_bl;
MAuthReply() : Message(CEPH_MSG_AUTH_REPLY), protocol(0), result(0), global_id(0) {}
MAuthReply(__u32 p, ceph::buffer::list *bl = NULL, int r = 0, uint64_t gid=0, const char *msg = "") :
Message(CEPH_MSG_AUTH_REPLY),
protocol(p), result(r), global_id(gid),
result_msg(msg) {
if (bl)
result_bl = *bl;
}
private:
~MAuthReply() final {}
public:
std::string_view get_type_name() const override { return "auth_reply"; }
void print(std::ostream& o) const override {
o << "auth_reply(proto " << protocol << " " << result << " " << cpp_strerror(result);
if (result_msg.length())
o << ": " << result_msg;
o << ")";
}
void decode_payload() override {
using ceph::decode;
auto p = payload.cbegin();
decode(protocol, p);
decode(result, p);
decode(global_id, p);
decode(result_bl, p);
decode(result_msg, p);
}
void encode_payload(uint64_t features) override {
using ceph::encode;
encode(protocol, payload);
encode(result, payload);
encode(global_id, payload);
encode(result_bl, payload);
encode(result_msg, payload);
}
};
#endif
| 1,869 | 26.101449 | 103 | h |
null | ceph-main/src/messages/MBackfillReserve.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MBACKFILL_H
#define CEPH_MBACKFILL_H
#include "msg/Message.h"
#include "messages/MOSDPeeringOp.h"
#include "osd/PGPeeringEvent.h"
class MBackfillReserve : public MOSDPeeringOp {
private:
static constexpr int HEAD_VERSION = 5;
static constexpr int COMPAT_VERSION = 4;
public:
spg_t pgid;
epoch_t query_epoch;
enum {
REQUEST = 0, // primary->replica: please reserve a slot
GRANT = 1, // replica->primary: ok, i reserved it
REJECT_TOOFULL = 2, // replica->primary: too full, sorry, try again later (*)
RELEASE = 3, // primary->replcia: release the slot i reserved before
REVOKE_TOOFULL = 4, // replica->primary: too full, stop backfilling
REVOKE = 5, // replica->primary: i'm taking back the slot i gave you
// (*) NOTE: prior to luminous, REJECT was overloaded to also mean release
};
uint32_t type;
uint32_t priority;
int64_t primary_num_bytes;
int64_t shard_num_bytes;
spg_t get_spg() const {
return pgid;
}
epoch_t get_map_epoch() const {
return query_epoch;
}
epoch_t get_min_epoch() const {
return query_epoch;
}
PGPeeringEvent *get_event() override {
switch (type) {
case REQUEST:
return new PGPeeringEvent(
query_epoch,
query_epoch,
RequestBackfillPrio(priority, primary_num_bytes, shard_num_bytes));
case GRANT:
return new PGPeeringEvent(
query_epoch,
query_epoch,
RemoteBackfillReserved());
case REJECT_TOOFULL:
// NOTE: this is replica -> primary "i reject your request"
// and also primary -> replica "cancel my previously-granted request"
// (for older peers)
// and also replica -> primary "i revoke your reservation"
// (for older peers)
return new PGPeeringEvent(
query_epoch,
query_epoch,
RemoteReservationRejectedTooFull());
case RELEASE:
return new PGPeeringEvent(
query_epoch,
query_epoch,
RemoteReservationCanceled());
case REVOKE_TOOFULL:
return new PGPeeringEvent(
query_epoch,
query_epoch,
RemoteReservationRevokedTooFull());
case REVOKE:
return new PGPeeringEvent(
query_epoch,
query_epoch,
RemoteReservationRevoked());
default:
ceph_abort();
}
}
MBackfillReserve()
: MOSDPeeringOp{MSG_OSD_BACKFILL_RESERVE, HEAD_VERSION, COMPAT_VERSION},
query_epoch(0), type(-1), priority(-1), primary_num_bytes(0),
shard_num_bytes(0) {}
MBackfillReserve(int type,
spg_t pgid,
epoch_t query_epoch, unsigned prio = -1,
int64_t primary_num_bytes = 0,
int64_t shard_num_bytes = 0)
: MOSDPeeringOp{MSG_OSD_BACKFILL_RESERVE, HEAD_VERSION, COMPAT_VERSION},
pgid(pgid), query_epoch(query_epoch),
type(type), priority(prio), primary_num_bytes(primary_num_bytes),
shard_num_bytes(shard_num_bytes) {}
std::string_view get_type_name() const override {
return "MBackfillReserve";
}
void inner_print(std::ostream& out) const override {
switch (type) {
case REQUEST:
out << "REQUEST";
break;
case GRANT:
out << "GRANT";
break;
case REJECT_TOOFULL:
out << "REJECT_TOOFULL";
break;
case RELEASE:
out << "RELEASE";
break;
case REVOKE_TOOFULL:
out << "REVOKE_TOOFULL";
break;
case REVOKE:
out << "REVOKE";
break;
}
if (type == REQUEST) out << " prio: " << priority;
return;
}
void decode_payload() override {
auto p = payload.cbegin();
using ceph::decode;
decode(pgid.pgid, p);
decode(query_epoch, p);
decode(type, p);
decode(priority, p);
decode(pgid.shard, p);
if (header.version >= 5) {
decode(primary_num_bytes, p);
decode(shard_num_bytes, p);
} else {
primary_num_bytes = 0;
shard_num_bytes = 0;
}
}
void encode_payload(uint64_t features) override {
using ceph::encode;
if (!HAVE_FEATURE(features, RECOVERY_RESERVATION_2)) {
header.version = 3;
header.compat_version = 3;
encode(pgid.pgid, payload);
encode(query_epoch, payload);
encode((type == RELEASE || type == REVOKE_TOOFULL || type == REVOKE) ?
REJECT_TOOFULL : type, payload);
encode(priority, payload);
encode(pgid.shard, payload);
return;
}
header.version = HEAD_VERSION;
header.compat_version = COMPAT_VERSION;
encode(pgid.pgid, payload);
encode(query_epoch, payload);
encode(type, payload);
encode(priority, payload);
encode(pgid.shard, payload);
encode(primary_num_bytes, payload);
encode(shard_num_bytes, payload);
}
};
#endif
| 5,103 | 27.198895 | 84 | h |
null | ceph-main/src/messages/MCacheExpire.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MCACHEEXPIRE_H
#define CEPH_MCACHEEXPIRE_H
#include <string_view>
#include "mds/mdstypes.h"
#include "messages/MMDSOp.h"
class MCacheExpire final : public MMDSOp {
private:
__s32 from;
public:
/*
group things by realm (auth delgation root), since that's how auth is determined.
that makes it less work to process when exports are in progress.
*/
struct realm {
std::map<vinodeno_t, uint32_t> inodes;
std::map<dirfrag_t, uint32_t> dirs;
std::map<dirfrag_t, std::map<std::pair<std::string,snapid_t>,uint32_t> > dentries;
void merge(const realm& o) {
inodes.insert(o.inodes.begin(), o.inodes.end());
dirs.insert(o.dirs.begin(), o.dirs.end());
for (const auto &p : o.dentries) {
auto em = dentries.emplace(std::piecewise_construct, std::forward_as_tuple(p.first), std::forward_as_tuple(p.second));
if (!em.second) {
em.first->second.insert(p.second.begin(), p.second.end());
}
}
}
void encode(ceph::buffer::list &bl) const {
using ceph::encode;
encode(inodes, bl);
encode(dirs, bl);
encode(dentries, bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
using ceph::decode;
decode(inodes, bl);
decode(dirs, bl);
decode(dentries, bl);
}
};
WRITE_CLASS_ENCODER(realm)
std::map<dirfrag_t, realm> realms;
int get_from() const { return from; }
protected:
MCacheExpire() : MMDSOp{MSG_MDS_CACHEEXPIRE}, from(-1) {}
MCacheExpire(int f) :
MMDSOp{MSG_MDS_CACHEEXPIRE},
from(f) { }
~MCacheExpire() final {}
public:
std::string_view get_type_name() const override { return "cache_expire";}
void add_inode(dirfrag_t r, vinodeno_t vino, unsigned nonce) {
realms[r].inodes[vino] = nonce;
}
void add_dir(dirfrag_t r, dirfrag_t df, unsigned nonce) {
realms[r].dirs[df] = nonce;
}
void add_dentry(dirfrag_t r, dirfrag_t df, std::string_view dn, snapid_t last, unsigned nonce) {
realms[r].dentries[df][std::pair<std::string,snapid_t>(dn,last)] = nonce;
}
void add_realm(dirfrag_t df, const realm& r) {
auto em = realms.emplace(std::piecewise_construct, std::forward_as_tuple(df), std::forward_as_tuple(r));
if (!em.second) {
em.first->second.merge(r);
}
}
void decode_payload() override {
using ceph::decode;
auto p = payload.cbegin();
decode(from, p);
decode(realms, p);
}
void encode_payload(uint64_t features) override {
using ceph::encode;
encode(from, payload);
encode(realms, payload);
}
private:
template<class T, typename... Args>
friend boost::intrusive_ptr<T> ceph::make_message(Args&&... args);
template<class T, typename... Args>
friend MURef<T> crimson::make_message(Args&&... args);
};
WRITE_CLASS_ENCODER(MCacheExpire::realm)
#endif
| 3,254 | 27.304348 | 126 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.