repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
null | ceph-main/src/rgw/services/svc_bucket_sobj.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_meta_be.h"
#include "svc_bucket_types.h"
#include "svc_bucket.h"
#include "svc_bucket_sync.h"
class RGWSI_Zone;
class RGWSI_SysObj;
class RGWSI_SysObj_Cache;
class RGWSI_Meta;
class RGWSI_SyncModules;
struct rgw_cache_entry_info;
template <class T>
class RGWChainedCacheImpl;
class RGWSI_Bucket_SObj : public RGWSI_Bucket
{
struct bucket_info_cache_entry {
RGWBucketInfo info;
real_time mtime;
std::map<std::string, bufferlist> attrs;
};
using RGWChainedCacheImpl_bucket_info_cache_entry = RGWChainedCacheImpl<bucket_info_cache_entry>;
std::unique_ptr<RGWChainedCacheImpl_bucket_info_cache_entry> binfo_cache;
RGWSI_Bucket_BE_Handler ep_be_handler;
std::unique_ptr<RGWSI_MetaBackend::Module> ep_be_module;
RGWSI_BucketInstance_BE_Handler bi_be_handler;
std::unique_ptr<RGWSI_MetaBackend::Module> bi_be_module;
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
int do_read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version> refresh_version,
optional_yield y,
const DoutPrefixProvider *dpp);
int read_bucket_stats(const RGWBucketInfo& bucket_info,
RGWBucketEnt *ent,
optional_yield y,
const DoutPrefixProvider *dpp);
public:
struct Svc {
RGWSI_Bucket_SObj *bucket{nullptr};
RGWSI_BucketIndex *bi{nullptr};
RGWSI_Zone *zone{nullptr};
RGWSI_SysObj *sysobj{nullptr};
RGWSI_SysObj_Cache *cache{nullptr};
RGWSI_Meta *meta{nullptr};
RGWSI_MetaBackend *meta_be{nullptr};
RGWSI_SyncModules *sync_modules{nullptr};
RGWSI_Bucket_Sync *bucket_sync{nullptr};
} svc;
RGWSI_Bucket_SObj(CephContext *cct);
~RGWSI_Bucket_SObj();
RGWSI_Bucket_BE_Handler& get_ep_be_handler() override {
return ep_be_handler;
}
RGWSI_BucketInstance_BE_Handler& get_bi_be_handler() override {
return bi_be_handler;
}
void init(RGWSI_Zone *_zone_svc,
RGWSI_SysObj *_sysobj_svc,
RGWSI_SysObj_Cache *_cache_svc,
RGWSI_BucketIndex *_bi,
RGWSI_Meta *_meta_svc,
RGWSI_MetaBackend *_meta_be_svc,
RGWSI_SyncModules *_sync_modules_svc,
RGWSI_Bucket_Sync *_bucket_sync_svc);
int read_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWBucketEntryPoint *entry_point,
RGWObjVersionTracker *objv_tracker,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none) override;
int store_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWBucketEntryPoint& info,
bool exclusive,
real_time mtime,
std::map<std::string, bufferlist> *pattrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int remove_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none) override;
int read_bucket_info(RGWSI_Bucket_X_Ctx& ep_ctx,
const rgw_bucket& bucket,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
boost::optional<obj_version> refresh_version,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo& info,
std::optional<RGWBucketInfo *> orig_info, /* nullopt: orig_info was not fetched,
nullptr: orig_info was not found (new bucket instance */
bool exclusive,
real_time mtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
const RGWBucketInfo& bucket_info,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx,
const rgw_bucket& bucket,
RGWBucketEnt *ent,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
std::map<std::string, RGWBucketEnt>& m,
optional_yield y,
const DoutPrefixProvider *dpp) override;
};
| 7,278 | 39.21547 | 134 | h |
null | ceph-main/src/rgw/services/svc_bucket_sync.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_bucket_types.h"
class RGWBucketSyncPolicyHandler;
using RGWBucketSyncPolicyHandlerRef = std::shared_ptr<RGWBucketSyncPolicyHandler>;
class RGWSI_Bucket_Sync : public RGWServiceInstance
{
public:
RGWSI_Bucket_Sync(CephContext *cct) : RGWServiceInstance(cct) {}
virtual int get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int handle_bi_update(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y) = 0;
virtual int handle_bi_removal(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
optional_yield y) = 0;
virtual int get_bucket_sync_hints(const DoutPrefixProvider *dpp,
const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y) = 0;
};
| 1,863 | 32.285714 | 82 | h |
null | ceph-main/src/rgw/services/svc_bucket_sync_sobj.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_meta_be.h"
#include "svc_bucket_sync.h"
class RGWSI_Zone;
class RGWSI_SysObj_Cache;
class RGWSI_Bucket_SObj;
template <class T>
class RGWChainedCacheImpl;
class RGWSI_Bucket_Sync_SObj_HintIndexManager;
struct rgw_sync_bucket_entity;
class RGWSI_Bucket_Sync_SObj : public RGWSI_Bucket_Sync
{
struct bucket_sync_policy_cache_entry {
std::shared_ptr<RGWBucketSyncPolicyHandler> handler;
};
std::unique_ptr<RGWChainedCacheImpl<bucket_sync_policy_cache_entry> > sync_policy_cache;
std::unique_ptr<RGWSI_Bucket_Sync_SObj_HintIndexManager> hint_index_mgr;
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
struct optional_zone_bucket {
std::optional<rgw_zone_id> zone;
std::optional<rgw_bucket> bucket;
optional_zone_bucket(const std::optional<rgw_zone_id>& _zone,
const std::optional<rgw_bucket>& _bucket) : zone(_zone), bucket(_bucket) {}
bool operator<(const optional_zone_bucket& ozb) const {
if (zone < ozb.zone) {
return true;
}
if (zone > ozb.zone) {
return false;
}
return bucket < ozb.bucket;
}
};
void get_hint_entities(RGWSI_Bucket_X_Ctx& ctx,
const std::set<rgw_zone_id>& zone_names,
const std::set<rgw_bucket>& buckets,
std::set<rgw_sync_bucket_entity> *hint_entities,
optional_yield y, const DoutPrefixProvider *);
int resolve_policy_hints(RGWSI_Bucket_X_Ctx& ctx,
rgw_sync_bucket_entity& self_entity,
RGWBucketSyncPolicyHandlerRef& handler,
RGWBucketSyncPolicyHandlerRef& zone_policy_handler,
std::map<optional_zone_bucket, RGWBucketSyncPolicyHandlerRef>& temp_map,
optional_yield y,
const DoutPrefixProvider *dpp);
int do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> _bucket,
std::map<optional_zone_bucket, RGWBucketSyncPolicyHandlerRef>& temp_map,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp);
public:
struct Svc {
RGWSI_Zone *zone{nullptr};
RGWSI_SysObj *sysobj{nullptr};
RGWSI_SysObj_Cache *cache{nullptr};
RGWSI_Bucket_SObj *bucket_sobj{nullptr};
} svc;
RGWSI_Bucket_Sync_SObj(CephContext *cct);
~RGWSI_Bucket_Sync_SObj();
void init(RGWSI_Zone *_zone_svc,
RGWSI_SysObj *_sysobj_svc,
RGWSI_SysObj_Cache *_cache_svc,
RGWSI_Bucket_SObj *_bucket_sobj_svc);
int get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp);
int handle_bi_update(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y) override;
int handle_bi_removal(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
optional_yield y) override;
int get_bucket_sync_hints(const DoutPrefixProvider *dpp,
const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y) override;
};
| 4,274 | 33.475806 | 100 | h |
null | ceph-main/src/rgw/services/svc_bucket_types.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/ptr_wrapper.h"
#include "svc_meta_be.h"
#include "svc_meta_be_types.h"
class RGWSI_MetaBackend_Handler;
using RGWSI_Bucket_BE_Handler = ptr_wrapper<RGWSI_MetaBackend_Handler, RGWSI_META_BE_TYPES::BUCKET>;
using RGWSI_BucketInstance_BE_Handler = ptr_wrapper<RGWSI_MetaBackend_Handler, RGWSI_META_BE_TYPES::BI>;
using RGWSI_Bucket_EP_Ctx = ptr_wrapper<RGWSI_MetaBackend::Context, RGWSI_META_BE_TYPES::BUCKET>;
using RGWSI_Bucket_BI_Ctx = ptr_wrapper<RGWSI_MetaBackend::Context, RGWSI_META_BE_TYPES::BI>;
struct RGWSI_Bucket_X_Ctx {
RGWSI_Bucket_EP_Ctx ep;
RGWSI_Bucket_BI_Ctx bi;
};
| 1,037 | 25.615385 | 104 | h |
null | ceph-main/src/rgw/services/svc_config_key.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
class RGWSI_ConfigKey : public RGWServiceInstance
{
public:
RGWSI_ConfigKey(CephContext *cct) : RGWServiceInstance(cct) {}
virtual ~RGWSI_ConfigKey() {}
virtual int get(const std::string& key, bool secure, bufferlist *result) = 0;
};
| 697 | 20.8125 | 79 | h |
null | ceph-main/src/rgw/services/svc_meta.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "svc_meta_be.h"
#include "rgw_service.h"
class RGWMetadataLog;
class RGWCoroutine;
class RGWSI_Meta : public RGWServiceInstance
{
RGWSI_SysObj *sysobj_svc{nullptr};
RGWSI_MDLog *mdlog_svc{nullptr};
std::map<RGWSI_MetaBackend::Type, RGWSI_MetaBackend *> be_svc;
std::vector<std::unique_ptr<RGWSI_MetaBackend_Handler> > be_handlers;
public:
RGWSI_Meta(CephContext *cct);
~RGWSI_Meta();
void init(RGWSI_SysObj *_sysobj_svc,
RGWSI_MDLog *_mdlog_svc,
std::vector<RGWSI_MetaBackend *>& _be_svc);
int create_be_handler(RGWSI_MetaBackend::Type be_type,
RGWSI_MetaBackend_Handler **phandler);
};
| 1,098 | 21.428571 | 71 | h |
null | ceph-main/src/rgw/services/svc_meta_be.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "svc_meta_be_params.h"
#include "rgw_service.h"
#include "rgw_mdlog_types.h"
class RGWMetadataLogData;
class RGWSI_MDLog;
class RGWSI_Meta;
class RGWObjVersionTracker;
class RGWSI_MetaBackend_Handler;
class RGWSI_MetaBackend : public RGWServiceInstance
{
friend class RGWSI_Meta;
public:
class Module;
class Context;
protected:
RGWSI_MDLog *mdlog_svc{nullptr};
void base_init(RGWSI_MDLog *_mdlog_svc) {
mdlog_svc = _mdlog_svc;
}
int prepare_mutate(RGWSI_MetaBackend::Context *ctx,
const std::string& key,
const ceph::real_time& mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
virtual int do_mutate(Context *ctx,
const std::string& key,
const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker,
RGWMDLogStatus op_type,
optional_yield y,
std::function<int()> f,
bool generic_prepare,
const DoutPrefixProvider *dpp);
virtual int pre_modify(const DoutPrefixProvider *dpp,
Context *ctx,
const std::string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker,
RGWMDLogStatus op_type,
optional_yield y);
virtual int post_modify(const DoutPrefixProvider *dpp,
Context *ctx,
const std::string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker, int ret,
optional_yield y);
public:
class Module {
/*
* Backend specialization module
*/
public:
virtual ~Module() = 0;
};
using ModuleRef = std::shared_ptr<Module>;
struct Context { /*
* A single metadata operation context. Will be holding info about
* backend and operation itself; operation might span multiple backend
* calls.
*/
virtual ~Context() = 0;
virtual void init(RGWSI_MetaBackend_Handler *h) = 0;
};
virtual Context *alloc_ctx() = 0;
struct PutParams {
ceph::real_time mtime;
PutParams() {}
PutParams(const ceph::real_time& _mtime) : mtime(_mtime) {}
virtual ~PutParams() = 0;
};
struct GetParams {
GetParams() {}
GetParams(ceph::real_time *_pmtime) : pmtime(_pmtime) {}
virtual ~GetParams();
ceph::real_time *pmtime{nullptr};
};
struct RemoveParams {
virtual ~RemoveParams() = 0;
ceph::real_time mtime;
};
struct MutateParams {
ceph::real_time mtime;
RGWMDLogStatus op_type;
MutateParams() {}
MutateParams(const ceph::real_time& _mtime,
RGWMDLogStatus _op_type) : mtime(_mtime), op_type(_op_type) {}
virtual ~MutateParams() {}
};
enum Type {
MDBE_SOBJ = 0,
MDBE_OTP = 1,
};
RGWSI_MetaBackend(CephContext *cct) : RGWServiceInstance(cct) {}
virtual ~RGWSI_MetaBackend() {}
virtual Type get_type() = 0;
virtual RGWSI_MetaBackend_Handler *alloc_be_handler() = 0;
virtual int call_with_get_params(ceph::real_time *pmtime, std::function<int(RGWSI_MetaBackend::GetParams&)>) = 0;
/* these should be implemented by backends */
virtual int get_entry(RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::GetParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
bool get_raw_attrs=false) = 0;
virtual int put_entry(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::PutParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) = 0;
virtual int remove_entry(const DoutPrefixProvider *dpp,
Context *ctx,
const std::string& key,
RGWSI_MetaBackend::RemoveParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) = 0;
virtual int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const std::string& marker) = 0;
virtual int list_next(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
int max, std::list<std::string> *keys,
bool *truncated) = 0;
virtual int list_get_marker(RGWSI_MetaBackend::Context *ctx,
std::string *marker) = 0;
int call(std::function<int(RGWSI_MetaBackend::Context *)> f) {
return call(std::nullopt, f);
}
virtual int call(std::optional<RGWSI_MetaBackend_CtxParams> opt,
std::function<int(RGWSI_MetaBackend::Context *)> f) = 0;
virtual int get_shard_id(RGWSI_MetaBackend::Context *ctx,
const std::string& key,
int *shard_id) = 0;
/* higher level */
virtual int get(Context *ctx,
const std::string& key,
GetParams ¶ms,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
bool get_raw_attrs=false);
virtual int put(Context *ctx,
const std::string& key,
PutParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
virtual int remove(Context *ctx,
const std::string& key,
RemoveParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
virtual int mutate(Context *ctx,
const std::string& key,
MutateParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
std::function<int()> f,
const DoutPrefixProvider *dpp);
};
class RGWSI_MetaBackend_Handler {
RGWSI_MetaBackend *be{nullptr};
public:
class Op {
friend class RGWSI_MetaBackend_Handler;
RGWSI_MetaBackend *be;
RGWSI_MetaBackend::Context *be_ctx;
Op(RGWSI_MetaBackend *_be,
RGWSI_MetaBackend::Context *_ctx) : be(_be), be_ctx(_ctx) {}
public:
RGWSI_MetaBackend::Context *ctx() {
return be_ctx;
}
int get(const std::string& key,
RGWSI_MetaBackend::GetParams ¶ms,
RGWObjVersionTracker *objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) {
return be->get(be_ctx, key, params, objv_tracker, y, dpp);
}
int put(const std::string& key,
RGWSI_MetaBackend::PutParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) {
return be->put(be_ctx, key, params, objv_tracker, y, dpp);
}
int remove(const std::string& key,
RGWSI_MetaBackend::RemoveParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) {
return be->remove(be_ctx, key, params, objv_tracker, y, dpp);
}
int mutate(const std::string& key,
RGWSI_MetaBackend::MutateParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
std::function<int()> f,
const DoutPrefixProvider *dpp) {
return be->mutate(be_ctx, key, params, objv_tracker, y, f, dpp);
}
int list_init(const DoutPrefixProvider *dpp, const std::string& marker) {
return be->list_init(dpp, be_ctx, marker);
}
int list_next(const DoutPrefixProvider *dpp, int max, std::list<std::string> *keys,
bool *truncated) {
return be->list_next(dpp, be_ctx, max, keys, truncated);
}
int list_get_marker(std::string *marker) {
return be->list_get_marker(be_ctx, marker);
}
int get_shard_id(const std::string& key, int *shard_id) {
return be->get_shard_id(be_ctx, key, shard_id);
}
};
class Op_ManagedCtx : public Op {
std::unique_ptr<RGWSI_MetaBackend::Context> pctx;
public:
Op_ManagedCtx(RGWSI_MetaBackend_Handler *handler);
};
RGWSI_MetaBackend_Handler(RGWSI_MetaBackend *_be) : be(_be) {}
virtual ~RGWSI_MetaBackend_Handler() {}
int call(std::function<int(Op *)> f) {
return call(std::nullopt, f);
}
virtual int call(std::optional<RGWSI_MetaBackend_CtxParams> bectx_params,
std::function<int(Op *)> f);
};
| 9,488 | 31.166102 | 119 | h |
null | ceph-main/src/rgw/services/svc_meta_be_otp.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_cls.h"
#include "svc_meta_be.h"
#include "svc_meta_be_sobj.h"
#include "svc_sys_obj.h"
using RGWSI_MBOTP_Handler_Module = RGWSI_MBSObj_Handler_Module;
using RGWSI_MetaBackend_Handler_OTP = RGWSI_MetaBackend_Handler_SObj;
using otp_devices_list_t = std::list<rados::cls::otp::otp_info_t>;
struct RGWSI_MBOTP_GetParams : public RGWSI_MetaBackend::GetParams {
otp_devices_list_t *pdevices{nullptr};
};
struct RGWSI_MBOTP_PutParams : public RGWSI_MetaBackend::PutParams {
otp_devices_list_t devices;
};
using RGWSI_MBOTP_RemoveParams = RGWSI_MBSObj_RemoveParams;
class RGWSI_MetaBackend_OTP : public RGWSI_MetaBackend_SObj
{
RGWSI_Cls *cls_svc{nullptr};
public:
struct Context_OTP : public RGWSI_MetaBackend_SObj::Context_SObj {
otp_devices_list_t devices;
};
RGWSI_MetaBackend_OTP(CephContext *cct);
virtual ~RGWSI_MetaBackend_OTP();
RGWSI_MetaBackend::Type get_type() {
return MDBE_OTP;
}
static std::string get_meta_key(const rgw_user& user);
void init(RGWSI_SysObj *_sysobj_svc,
RGWSI_MDLog *_mdlog_svc,
RGWSI_Cls *_cls_svc) {
RGWSI_MetaBackend_SObj::init(_sysobj_svc, _mdlog_svc);
cls_svc = _cls_svc;
}
RGWSI_MetaBackend_Handler *alloc_be_handler() override;
RGWSI_MetaBackend::Context *alloc_ctx() override;
int call_with_get_params(ceph::real_time *pmtime, std::function<int(RGWSI_MetaBackend::GetParams&)> cb) override;
int get_entry(RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::GetParams& _params,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
bool get_raw_attrs=false);
int put_entry(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::PutParams& _params,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
};
| 2,477 | 26.533333 | 115 | h |
null | ceph-main/src/rgw/services/svc_meta_be_params.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <variant>
struct RGWSI_MetaBackend_CtxParams_SObj {};
using RGWSI_MetaBackend_CtxParams = std::variant<RGWSI_MetaBackend_CtxParams_SObj>;
| 578 | 21.269231 | 83 | h |
null | ceph-main/src/rgw/services/svc_meta_be_sobj.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_meta_be.h"
#include "svc_sys_obj.h"
class RGWSI_MBSObj_Handler_Module : public RGWSI_MetaBackend::Module {
protected:
std::string section;
public:
RGWSI_MBSObj_Handler_Module(const std::string& _section) : section(_section) {}
virtual void get_pool_and_oid(const std::string& key, rgw_pool *pool, std::string *oid) = 0;
virtual const std::string& get_oid_prefix() = 0;
virtual std::string key_to_oid(const std::string& key) = 0;
virtual bool is_valid_oid(const std::string& oid) = 0;
virtual std::string oid_to_key(const std::string& oid) = 0;
const std::string& get_section() {
return section;
}
/* key to use for hashing entries for log shard placement */
virtual std::string get_hash_key(const std::string& key) {
return section + ":" + key;
}
};
struct RGWSI_MBSObj_GetParams : public RGWSI_MetaBackend::GetParams {
bufferlist *pbl{nullptr};
std::map<std::string, bufferlist> *pattrs{nullptr};
rgw_cache_entry_info *cache_info{nullptr};
boost::optional<obj_version> refresh_version;
RGWSI_MBSObj_GetParams() {}
RGWSI_MBSObj_GetParams(bufferlist *_pbl,
std::map<std::string, bufferlist> *_pattrs,
ceph::real_time *_pmtime) : RGWSI_MetaBackend::GetParams(_pmtime),
pbl(_pbl),
pattrs(_pattrs) {}
RGWSI_MBSObj_GetParams& set_cache_info(rgw_cache_entry_info *_cache_info) {
cache_info = _cache_info;
return *this;
}
RGWSI_MBSObj_GetParams& set_refresh_version(boost::optional<obj_version>& _refresh_version) {
refresh_version = _refresh_version;
return *this;
}
};
struct RGWSI_MBSObj_PutParams : public RGWSI_MetaBackend::PutParams {
bufferlist bl;
std::map<std::string, bufferlist> *pattrs{nullptr};
bool exclusive{false};
RGWSI_MBSObj_PutParams() {}
RGWSI_MBSObj_PutParams(std::map<std::string, bufferlist> *_pattrs,
const ceph::real_time& _mtime) : RGWSI_MetaBackend::PutParams(_mtime),
pattrs(_pattrs) {}
RGWSI_MBSObj_PutParams(bufferlist& _bl,
std::map<std::string, bufferlist> *_pattrs,
const ceph::real_time& _mtime,
bool _exclusive) : RGWSI_MetaBackend::PutParams(_mtime),
bl(_bl),
pattrs(_pattrs),
exclusive(_exclusive) {}
};
struct RGWSI_MBSObj_RemoveParams : public RGWSI_MetaBackend::RemoveParams {
};
class RGWSI_MetaBackend_SObj : public RGWSI_MetaBackend
{
protected:
RGWSI_SysObj *sysobj_svc{nullptr};
public:
struct Context_SObj : public RGWSI_MetaBackend::Context {
RGWSI_MBSObj_Handler_Module *module{nullptr};
struct _list {
std::optional<RGWSI_SysObj::Pool> pool;
std::optional<RGWSI_SysObj::Pool::Op> op;
} list;
void init(RGWSI_MetaBackend_Handler *h) override;
};
RGWSI_MetaBackend_SObj(CephContext *cct);
virtual ~RGWSI_MetaBackend_SObj();
RGWSI_MetaBackend::Type get_type() {
return MDBE_SOBJ;
}
void init(RGWSI_SysObj *_sysobj_svc,
RGWSI_MDLog *_mdlog_svc) {
base_init(_mdlog_svc);
sysobj_svc = _sysobj_svc;
}
RGWSI_MetaBackend_Handler *alloc_be_handler() override;
RGWSI_MetaBackend::Context *alloc_ctx() override;
int call_with_get_params(ceph::real_time *pmtime, std::function<int(RGWSI_MetaBackend::GetParams&)> cb) override;
int pre_modify(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker,
RGWMDLogStatus op_type,
optional_yield y);
int post_modify(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker, int ret,
optional_yield y);
int get_entry(RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::GetParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
bool get_raw_attrs=false) override;
int put_entry(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::PutParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) override;
int remove_entry(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::RemoveParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) override;
int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const std::string& marker) override;
int list_next(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *_ctx,
int max, std::list<std::string> *keys,
bool *truncated) override;
int list_get_marker(RGWSI_MetaBackend::Context *ctx,
std::string *marker) override;
int get_shard_id(RGWSI_MetaBackend::Context *ctx,
const std::string& key,
int *shard_id) override;
int call(std::optional<RGWSI_MetaBackend_CtxParams> opt,
std::function<int(RGWSI_MetaBackend::Context *)> f) override;
};
class RGWSI_MetaBackend_Handler_SObj : public RGWSI_MetaBackend_Handler {
friend class RGWSI_MetaBackend_SObj::Context_SObj;
RGWSI_MBSObj_Handler_Module *module{nullptr};
public:
RGWSI_MetaBackend_Handler_SObj(RGWSI_MetaBackend *be) :
RGWSI_MetaBackend_Handler(be) {}
void set_module(RGWSI_MBSObj_Handler_Module *_module) {
module = _module;
}
RGWSI_MBSObj_Handler_Module *get_module() {
return module;
}
};
| 6,733 | 33.533333 | 117 | h |
null | ceph-main/src/rgw/services/svc_notify.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_service.h"
#include "svc_rados.h"
class Context;
class RGWSI_Zone;
class RGWSI_Finisher;
class RGWWatcher;
class RGWSI_Notify_ShutdownCB;
struct RGWCacheNotifyInfo;
class RGWSI_Notify : public RGWServiceInstance
{
friend class RGWWatcher;
friend class RGWSI_Notify_ShutdownCB;
friend class RGWServices_Def;
public:
class CB;
private:
RGWSI_Zone *zone_svc{nullptr};
RGWSI_RADOS *rados_svc{nullptr};
RGWSI_Finisher *finisher_svc{nullptr};
ceph::shared_mutex watchers_lock = ceph::make_shared_mutex("watchers_lock");
rgw_pool control_pool;
int num_watchers{0};
RGWWatcher **watchers{nullptr};
std::set<int> watchers_set;
std::vector<RGWSI_RADOS::Obj> notify_objs;
bool enabled{false};
double inject_notify_timeout_probability{0};
static constexpr unsigned max_notify_retries = 10;
std::string get_control_oid(int i);
RGWSI_RADOS::Obj pick_control_obj(const std::string& key);
CB *cb{nullptr};
std::optional<int> finisher_handle;
RGWSI_Notify_ShutdownCB *shutdown_cb{nullptr};
bool finalized{false};
int init_watch(const DoutPrefixProvider *dpp, optional_yield y);
void finalize_watch();
void init(RGWSI_Zone *_zone_svc,
RGWSI_RADOS *_rados_svc,
RGWSI_Finisher *_finisher_svc) {
zone_svc = _zone_svc;
rados_svc = _rados_svc;
finisher_svc = _finisher_svc;
}
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
void shutdown() override;
int unwatch(RGWSI_RADOS::Obj& obj, uint64_t watch_handle);
void add_watcher(int i);
void remove_watcher(int i);
int watch_cb(const DoutPrefixProvider *dpp,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl);
void _set_enabled(bool status);
void set_enabled(bool status);
int robust_notify(const DoutPrefixProvider *dpp, RGWSI_RADOS::Obj& notify_obj,
const RGWCacheNotifyInfo& bl, optional_yield y);
void schedule_context(Context *c);
public:
RGWSI_Notify(CephContext *cct): RGWServiceInstance(cct) {}
virtual ~RGWSI_Notify() override;
class CB {
public:
virtual ~CB() {}
virtual int watch_cb(const DoutPrefixProvider *dpp,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl) = 0;
virtual void set_enabled(bool status) = 0;
};
int distribute(const DoutPrefixProvider *dpp, const std::string& key, const RGWCacheNotifyInfo& bl,
optional_yield y);
void register_watch_cb(CB *cb);
};
| 2,755 | 24.757009 | 101 | h |
null | ceph-main/src/rgw/services/svc_otp.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "cls/otp/cls_otp_types.h"
#include "rgw_service.h"
#include "svc_otp_types.h"
#include "svc_meta_be_otp.h"
class RGWSI_Zone;
class RGWSI_OTP : public RGWServiceInstance
{
RGWSI_OTP_BE_Handler be_handler;
std::unique_ptr<RGWSI_MetaBackend::Module> be_module;
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
public:
struct Svc {
RGWSI_OTP *otp{nullptr};
RGWSI_Zone *zone{nullptr};
RGWSI_Meta *meta{nullptr};
RGWSI_MetaBackend *meta_be{nullptr};
} svc;
RGWSI_OTP(CephContext *cct);
~RGWSI_OTP();
RGWSI_OTP_BE_Handler& get_be_handler() {
return be_handler;
}
void init(RGWSI_Zone *_zone_svc,
RGWSI_Meta *_meta_svc,
RGWSI_MetaBackend *_meta_be_svc);
int read_all(RGWSI_OTP_BE_Ctx& ctx,
const std::string& key,
otp_devices_list_t *devices,
real_time *pmtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
int read_all(RGWSI_OTP_BE_Ctx& ctx,
const rgw_user& uid,
otp_devices_list_t *devices,
real_time *pmtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
int store_all(const DoutPrefixProvider *dpp,
RGWSI_OTP_BE_Ctx& ctx,
const std::string& key,
const otp_devices_list_t& devices,
real_time mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
int store_all(const DoutPrefixProvider *dpp,
RGWSI_OTP_BE_Ctx& ctx,
const rgw_user& uid,
const otp_devices_list_t& devices,
real_time mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
int remove_all(const DoutPrefixProvider *dpp,
RGWSI_OTP_BE_Ctx& ctx,
const std::string& key,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
int remove_all(const DoutPrefixProvider *dpp,
RGWSI_OTP_BE_Ctx& ctx,
const rgw_user& uid,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
};
| 2,780 | 27.96875 | 71 | h |
null | ceph-main/src/rgw/services/svc_otp_types.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/ptr_wrapper.h"
#include "svc_meta_be.h"
#include "svc_meta_be_types.h"
class RGWSI_MetaBackend_Handler;
using RGWSI_OTP_BE_Handler = ptr_wrapper<RGWSI_MetaBackend_Handler, RGWSI_META_BE_TYPES::OTP>;
using RGWSI_OTP_BE_Ctx = ptr_wrapper<RGWSI_MetaBackend::Context, RGWSI_META_BE_TYPES::OTP>;
| 740 | 23.7 | 94 | h |
null | ceph-main/src/rgw/services/svc_rados.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_service.h"
#include "include/rados/librados.hpp"
#include "common/async/yield_context.h"
class RGWAsyncRadosProcessor;
class RGWAccessListFilter {
public:
virtual ~RGWAccessListFilter() {}
virtual bool filter(const std::string& name, std::string& key) = 0;
};
struct RGWAccessListFilterPrefix : public RGWAccessListFilter {
std::string prefix;
explicit RGWAccessListFilterPrefix(const std::string& _prefix) : prefix(_prefix) {}
bool filter(const std::string& name, std::string& key) override {
return (prefix.compare(key.substr(0, prefix.size())) == 0);
}
};
class RGWSI_RADOS : public RGWServiceInstance
{
librados::Rados rados;
std::unique_ptr<RGWAsyncRadosProcessor> async_processor;
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
public:
struct OpenParams {
bool create{true};
bool mostly_omap{false};
OpenParams() {}
OpenParams& set_create(bool _create) {
create = _create;
return *this;
}
OpenParams& set_mostly_omap(bool _mostly_omap) {
mostly_omap = _mostly_omap;
return *this;
}
};
private:
int open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
const OpenParams& params = {});
int pool_iterate(const DoutPrefixProvider *dpp,
librados::IoCtx& ioctx,
librados::NObjectIterator& iter,
uint32_t num, std::vector<rgw_bucket_dir_entry>& objs,
RGWAccessListFilter *filter,
bool *is_truncated);
public:
RGWSI_RADOS(CephContext *cct);
~RGWSI_RADOS();
librados::Rados* get_rados_handle();
void init() {}
void shutdown() override;
void stop_processor();
std::string cluster_fsid();
uint64_t instance_id();
bool check_secure_mon_conn(const DoutPrefixProvider *dpp) const;
RGWAsyncRadosProcessor *get_async_processor() {
return async_processor.get();
}
int clog_warn(const std::string& msg);
class Handle;
class Pool {
friend class RGWSI_RADOS;
friend Handle;
friend class Obj;
RGWSI_RADOS *rados_svc{nullptr};
rgw_pool pool;
struct State {
librados::IoCtx ioctx;
} state;
Pool(RGWSI_RADOS *_rados_svc,
const rgw_pool& _pool) : rados_svc(_rados_svc),
pool(_pool) {}
Pool(RGWSI_RADOS *_rados_svc) : rados_svc(_rados_svc) {}
public:
Pool() {}
int create(const DoutPrefixProvider *dpp);
int create(const DoutPrefixProvider *dpp, const std::vector<rgw_pool>& pools, std::vector<int> *retcodes);
int lookup();
int open(const DoutPrefixProvider *dpp, const OpenParams& params = {});
const rgw_pool& get_pool() {
return pool;
}
librados::IoCtx& ioctx() & {
return state.ioctx;
}
librados::IoCtx&& ioctx() && {
return std::move(state.ioctx);
}
struct List {
Pool *pool{nullptr};
struct Ctx {
bool initialized{false};
librados::IoCtx ioctx;
librados::NObjectIterator iter;
RGWAccessListFilter *filter{nullptr};
} ctx;
List() {}
List(Pool *_pool) : pool(_pool) {}
int init(const DoutPrefixProvider *dpp, const std::string& marker, RGWAccessListFilter *filter = nullptr);
int get_next(const DoutPrefixProvider *dpp, int max,
std::vector<std::string> *oids,
bool *is_truncated);
int get_marker(std::string *marker);
};
List op() {
return List(this);
}
friend List;
};
struct rados_ref {
RGWSI_RADOS::Pool pool;
rgw_raw_obj obj;
};
class Obj {
friend class RGWSI_RADOS;
friend class Handle;
RGWSI_RADOS *rados_svc{nullptr};
rados_ref ref;
void init(const rgw_raw_obj& obj);
Obj(RGWSI_RADOS *_rados_svc, const rgw_raw_obj& _obj)
: rados_svc(_rados_svc) {
init(_obj);
}
Obj(Pool& pool, const std::string& oid);
public:
Obj() {}
int open(const DoutPrefixProvider *dpp);
int operate(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation *op, optional_yield y,
int flags = 0);
int operate(const DoutPrefixProvider *dpp, librados::ObjectReadOperation *op, bufferlist *pbl,
optional_yield y, int flags = 0);
int aio_operate(librados::AioCompletion *c, librados::ObjectWriteOperation *op);
int aio_operate(librados::AioCompletion *c, librados::ObjectReadOperation *op,
bufferlist *pbl);
int watch(uint64_t *handle, librados::WatchCtx2 *ctx);
int aio_watch(librados::AioCompletion *c, uint64_t *handle, librados::WatchCtx2 *ctx);
int unwatch(uint64_t handle);
int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl, optional_yield y);
void notify_ack(uint64_t notify_id,
uint64_t cookie,
bufferlist& bl);
uint64_t get_last_version();
rados_ref& get_ref() { return ref; }
const rados_ref& get_ref() const { return ref; }
const rgw_raw_obj& get_raw_obj() const {
return ref.obj;
}
};
class Handle {
friend class RGWSI_RADOS;
RGWSI_RADOS *rados_svc{nullptr};
Handle(RGWSI_RADOS *_rados_svc) : rados_svc(_rados_svc) {}
public:
Obj obj(const rgw_raw_obj& o);
Pool pool(const rgw_pool& p) {
return Pool(rados_svc, p);
}
int watch_flush();
int mon_command(std::string cmd,
const bufferlist& inbl,
bufferlist *outbl,
std::string *outs);
};
Handle handle() {
return Handle(this);
}
Obj obj(const rgw_raw_obj& o) {
return Obj(this, o);
}
Obj obj(Pool& pool, const std::string& oid) {
return Obj(pool, oid);
}
Pool pool() {
return Pool(this);
}
Pool pool(const rgw_pool& p) {
return Pool(this, p);
}
friend Obj;
friend Pool;
friend Pool::List;
};
using rgw_rados_ref = RGWSI_RADOS::rados_ref;
inline std::ostream& operator<<(std::ostream& out, const RGWSI_RADOS::Obj& obj) {
return out << obj.get_raw_obj();
}
| 6,282 | 23.833992 | 112 | h |
null | ceph-main/src/rgw/services/svc_role_rados.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "rgw_role.h"
#include "svc_meta_be.h"
class RGWSI_Role_RADOS: public RGWServiceInstance
{
public:
struct Svc {
RGWSI_Zone *zone{nullptr};
RGWSI_Meta *meta{nullptr};
RGWSI_MetaBackend *meta_be{nullptr};
RGWSI_SysObj *sysobj{nullptr};
} svc;
RGWSI_Role_RADOS(CephContext *cct) : RGWServiceInstance(cct) {}
~RGWSI_Role_RADOS() {}
void init(RGWSI_Zone *_zone_svc,
RGWSI_Meta *_meta_svc,
RGWSI_MetaBackend *_meta_be_svc,
RGWSI_SysObj *_sysobj_svc);
RGWSI_MetaBackend_Handler * get_be_handler();
int do_start(optional_yield y, const DoutPrefixProvider *dpp) override;
private:
RGWSI_MetaBackend_Handler *be_handler;
std::unique_ptr<RGWSI_MetaBackend::Module> be_module;
};
static const std::string role_name_oid_prefix = "role_names.";
static const std::string role_oid_prefix = "roles.";
static const std::string role_path_oid_prefix = "role_paths.";
| 1,357 | 25.627451 | 73 | h |
null | ceph-main/src/rgw/services/svc_sync_modules.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_service.h"
#include "rgw_sync_module.h"
class RGWSI_Zone;
class RGWSyncModulesManager;
class RGWSI_SyncModules : public RGWServiceInstance
{
RGWSyncModulesManager *sync_modules_manager{nullptr};
RGWSyncModuleInstanceRef sync_module;
struct Svc {
RGWSI_Zone *zone{nullptr};
} svc;
public:
RGWSI_SyncModules(CephContext *cct): RGWServiceInstance(cct) {}
~RGWSI_SyncModules();
RGWSyncModulesManager *get_manager() {
return sync_modules_manager;
}
void init(RGWSI_Zone *zone_svc);
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
RGWSyncModuleInstanceRef& get_sync_module() { return sync_module; }
};
| 790 | 21.6 | 71 | h |
null | ceph-main/src/rgw/services/svc_sys_obj.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "common/static_ptr.h"
#include "rgw_service.h"
#include "svc_rados.h"
#include "svc_sys_obj_types.h"
#include "svc_sys_obj_core_types.h"
class RGWSI_Zone;
class RGWSI_SysObj;
struct rgw_cache_entry_info;
class RGWSI_SysObj : public RGWServiceInstance
{
friend struct RGWServices_Def;
public:
class Obj {
friend class ROp;
RGWSI_SysObj_Core *core_svc;
rgw_raw_obj obj;
public:
Obj(RGWSI_SysObj_Core *_core_svc, const rgw_raw_obj& _obj)
: core_svc(_core_svc), obj(_obj) {}
rgw_raw_obj& get_obj() {
return obj;
}
struct ROp {
Obj& source;
ceph::static_ptr<RGWSI_SysObj_Obj_GetObjState, sizeof(RGWSI_SysObj_Core_GetObjState)> state;
RGWObjVersionTracker *objv_tracker{nullptr};
std::map<std::string, bufferlist> *attrs{nullptr};
bool raw_attrs{false};
boost::optional<obj_version> refresh_version{boost::none};
ceph::real_time *lastmod{nullptr};
uint64_t *obj_size{nullptr};
rgw_cache_entry_info *cache_info{nullptr};
ROp& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
ROp& set_last_mod(ceph::real_time *_lastmod) {
lastmod = _lastmod;
return *this;
}
ROp& set_obj_size(uint64_t *_obj_size) {
obj_size = _obj_size;
return *this;
}
ROp& set_attrs(std::map<std::string, bufferlist> *_attrs) {
attrs = _attrs;
return *this;
}
ROp& set_raw_attrs(bool ra) {
raw_attrs = ra;
return *this;
}
ROp& set_refresh_version(boost::optional<obj_version>& rf) {
refresh_version = rf;
return *this;
}
ROp& set_cache_info(rgw_cache_entry_info *ci) {
cache_info = ci;
return *this;
}
ROp(Obj& _source);
int stat(optional_yield y, const DoutPrefixProvider *dpp);
int read(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, bufferlist *pbl, optional_yield y);
int read(const DoutPrefixProvider *dpp, bufferlist *pbl, optional_yield y) {
return read(dpp, 0, -1, pbl, y);
}
int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist *dest, optional_yield y);
};
struct WOp {
Obj& source;
RGWObjVersionTracker *objv_tracker{nullptr};
std::map<std::string, bufferlist> attrs;
ceph::real_time mtime;
ceph::real_time *pmtime{nullptr};
bool exclusive{false};
WOp& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
WOp& set_attrs(std::map<std::string, bufferlist>& _attrs) {
attrs = _attrs;
return *this;
}
WOp& set_attrs(std::map<std::string, bufferlist>&& _attrs) {
attrs = _attrs;
return *this;
}
WOp& set_mtime(const ceph::real_time& _mtime) {
mtime = _mtime;
return *this;
}
WOp& set_pmtime(ceph::real_time *_pmtime) {
pmtime = _pmtime;
return *this;
}
WOp& set_exclusive(bool _exclusive = true) {
exclusive = _exclusive;
return *this;
}
WOp(Obj& _source) : source(_source) {}
int remove(const DoutPrefixProvider *dpp, optional_yield y);
int write(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y);
int write_data(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); /* write data only */
int write_attrs(const DoutPrefixProvider *dpp, optional_yield y); /* write attrs only */
int write_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& bl,
optional_yield y); /* write attrs only */
};
struct OmapOp {
Obj& source;
bool must_exist{false};
OmapOp& set_must_exist(bool _must_exist = true) {
must_exist = _must_exist;
return *this;
}
OmapOp(Obj& _source) : source(_source) {}
int get_all(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist> *m, optional_yield y);
int get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count,
std::map<std::string, bufferlist> *m,
bool *pmore, optional_yield y);
int set(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& bl, optional_yield y);
int set(const DoutPrefixProvider *dpp, const std::map<std::string, bufferlist>& m, optional_yield y);
int del(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y);
};
struct WNOp {
Obj& source;
WNOp(Obj& _source) : source(_source) {}
int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
optional_yield y);
};
ROp rop() {
return ROp(*this);
}
WOp wop() {
return WOp(*this);
}
OmapOp omap() {
return OmapOp(*this);
}
WNOp wn() {
return WNOp(*this);
}
};
class Pool {
friend class Op;
friend class RGWSI_SysObj_Core;
RGWSI_SysObj_Core *core_svc;
rgw_pool pool;
protected:
using ListImplInfo = RGWSI_SysObj_Pool_ListInfo;
struct ListCtx {
ceph::static_ptr<ListImplInfo, sizeof(RGWSI_SysObj_Core_PoolListImplInfo)> impl; /* update this if creating new backend types */
};
public:
Pool(RGWSI_SysObj_Core *_core_svc,
const rgw_pool& _pool) : core_svc(_core_svc),
pool(_pool) {}
rgw_pool& get_pool() {
return pool;
}
struct Op {
Pool& source;
ListCtx ctx;
Op(Pool& _source) : source(_source) {}
int init(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& prefix);
int get_next(const DoutPrefixProvider *dpp, int max, std::vector<std::string> *oids, bool *is_truncated);
int get_marker(std::string *marker);
};
int list_prefixed_objs(const DoutPrefixProvider *dpp, const std::string& prefix, std::function<void(const std::string&)> cb);
template <typename Container>
int list_prefixed_objs(const DoutPrefixProvider *dpp, const std::string& prefix,
Container *result) {
return list_prefixed_objs(dpp, prefix, [&](const std::string& val) {
result->push_back(val);
});
}
Op op() {
return Op(*this);
}
};
friend class Obj;
friend class Obj::ROp;
friend class Obj::WOp;
friend class Pool;
friend class Pool::Op;
protected:
RGWSI_RADOS *rados_svc{nullptr};
RGWSI_SysObj_Core *core_svc{nullptr};
void init(RGWSI_RADOS *_rados_svc,
RGWSI_SysObj_Core *_core_svc) {
rados_svc = _rados_svc;
core_svc = _core_svc;
}
public:
RGWSI_SysObj(CephContext *cct): RGWServiceInstance(cct) {}
Obj get_obj(const rgw_raw_obj& obj);
Pool get_pool(const rgw_pool& pool) {
return Pool(core_svc, pool);
}
RGWSI_Zone *get_zone_svc();
};
using RGWSysObj = RGWSI_SysObj::Obj;
| 7,197 | 25.560886 | 134 | h |
null | ceph-main/src/rgw/services/svc_sys_obj_cache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "common/RWLock.h"
#include "rgw_service.h"
#include "rgw_cache.h"
#include "svc_sys_obj_core.h"
class RGWSI_Notify;
class RGWSI_SysObj_Cache_CB;
class RGWSI_SysObj_Cache_ASocketHook;
class RGWSI_SysObj_Cache : public RGWSI_SysObj_Core
{
friend class RGWSI_SysObj_Cache_CB;
friend class RGWServices_Def;
friend class ASocketHandler;
RGWSI_Notify *notify_svc{nullptr};
ObjectCache cache;
std::shared_ptr<RGWSI_SysObj_Cache_CB> cb;
void normalize_pool_and_obj(const rgw_pool& src_pool, const std::string& src_obj, rgw_pool& dst_pool, std::string& dst_obj);
protected:
void init(RGWSI_RADOS *_rados_svc,
RGWSI_Zone *_zone_svc,
RGWSI_Notify *_notify_svc) {
core_init(_rados_svc, _zone_svc);
notify_svc = _notify_svc;
}
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
void shutdown() override;
int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
uint64_t *psize, real_time *pmtime,
std::map<std::string, bufferlist> *attrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y) override;
int read(const DoutPrefixProvider *dpp,
RGWSI_SysObj_Obj_GetObjState& read_state,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
bufferlist *bl, off_t ofs, off_t end,
ceph::real_time* pmtime, uint64_t* psize,
std::map<std::string, bufferlist> *attrs,
bool raw_attrs,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version>,
optional_yield y) override;
int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const char *name, bufferlist *dest,
optional_yield y) override;
int set_attrs(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj,
std::map<std::string, bufferlist>& attrs,
std::map<std::string, bufferlist> *rmattrs,
RGWObjVersionTracker *objv_tracker,
bool exclusive, optional_yield y) override;
int remove(const DoutPrefixProvider *dpp,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
optional_yield y) override;
int write(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj,
real_time *pmtime,
std::map<std::string, bufferlist>& attrs,
bool exclusive,
const bufferlist& data,
RGWObjVersionTracker *objv_tracker,
real_time set_mtime,
optional_yield y) override;
int write_data(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj,
const bufferlist& bl,
bool exclusive,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
int distribute_cache(const DoutPrefixProvider *dpp, const std::string& normal_name, const rgw_raw_obj& obj,
ObjectCacheInfo& obj_info, int op,
optional_yield y);
int watch_cb(const DoutPrefixProvider *dpp,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl);
void set_enabled(bool status);
public:
RGWSI_SysObj_Cache(const DoutPrefixProvider *dpp, CephContext *cct) : RGWSI_SysObj_Core(cct), asocket(dpp, this) {
cache.set_ctx(cct);
}
bool chain_cache_entry(const DoutPrefixProvider *dpp,
std::initializer_list<rgw_cache_entry_info *> cache_info_entries,
RGWChainedCache::Entry *chained_entry);
void register_chained_cache(RGWChainedCache *cc);
void unregister_chained_cache(RGWChainedCache *cc);
class ASocketHandler {
const DoutPrefixProvider *dpp;
RGWSI_SysObj_Cache *svc;
std::unique_ptr<RGWSI_SysObj_Cache_ASocketHook> hook;
public:
ASocketHandler(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *_svc);
~ASocketHandler();
int start();
void shutdown();
// `call_list` must iterate over all cache entries and call
// `cache_list_dump_helper` with the supplied Formatter on any that
// include `filter` as a substd::string.
//
void call_list(const std::optional<std::string>& filter, Formatter* f);
// `call_inspect` must look up the requested target and, if found,
// dump it to the supplied Formatter and return true. If not found,
// it must return false.
//
int call_inspect(const std::string& target, Formatter* f);
// `call_erase` must erase the requested target and return true. If
// the requested target does not exist, it should return false.
int call_erase(const std::string& target);
// `call_zap` must erase the cache.
int call_zap();
} asocket;
};
template <class T>
class RGWChainedCacheImpl : public RGWChainedCache {
RGWSI_SysObj_Cache *svc{nullptr};
ceph::timespan expiry;
RWLock lock;
std::unordered_map<std::string, std::pair<T, ceph::coarse_mono_time>> entries;
public:
RGWChainedCacheImpl() : lock("RGWChainedCacheImpl::lock") {}
~RGWChainedCacheImpl() {
if (!svc) {
return;
}
svc->unregister_chained_cache(this);
}
void unregistered() override {
svc = nullptr;
}
void init(RGWSI_SysObj_Cache *_svc) {
if (!_svc) {
return;
}
svc = _svc;
svc->register_chained_cache(this);
expiry = std::chrono::seconds(svc->ctx()->_conf.get_val<uint64_t>(
"rgw_cache_expiry_interval"));
}
boost::optional<T> find(const std::string& key) {
std::shared_lock rl{lock};
auto iter = entries.find(key);
if (iter == entries.end()) {
return boost::none;
}
if (expiry.count() &&
(ceph::coarse_mono_clock::now() - iter->second.second) > expiry) {
return boost::none;
}
return iter->second.first;
}
bool put(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *svc, const std::string& key, T *entry,
std::initializer_list<rgw_cache_entry_info *> cache_info_entries) {
if (!svc) {
return false;
}
Entry chain_entry(this, key, entry);
/* we need the svc cache to call us under its lock to maintain lock ordering */
return svc->chain_cache_entry(dpp, cache_info_entries, &chain_entry);
}
void chain_cb(const std::string& key, void *data) override {
T *entry = static_cast<T *>(data);
std::unique_lock wl{lock};
entries[key].first = *entry;
if (expiry.count() > 0) {
entries[key].second = ceph::coarse_mono_clock::now();
}
}
void invalidate(const std::string& key) override {
std::unique_lock wl{lock};
entries.erase(key);
}
void invalidate_all() override {
std::unique_lock wl{lock};
entries.clear();
}
}; /* RGWChainedCacheImpl */
| 6,954 | 30.188341 | 126 | h |
null | ceph-main/src/rgw/services/svc_sys_obj_core.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_service.h"
#include "svc_rados.h"
#include "svc_sys_obj.h"
#include "svc_sys_obj_core_types.h"
class RGWSI_Zone;
struct rgw_cache_entry_info;
class RGWSI_SysObj_Core : public RGWServiceInstance
{
friend class RGWServices_Def;
friend class RGWSI_SysObj;
protected:
RGWSI_RADOS *rados_svc{nullptr};
RGWSI_Zone *zone_svc{nullptr};
using GetObjState = RGWSI_SysObj_Core_GetObjState;
using PoolListImplInfo = RGWSI_SysObj_Core_PoolListImplInfo;
void core_init(RGWSI_RADOS *_rados_svc,
RGWSI_Zone *_zone_svc) {
rados_svc = _rados_svc;
zone_svc = _zone_svc;
}
int get_rados_obj(const DoutPrefixProvider *dpp, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj);
virtual int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
uint64_t *psize, real_time *pmtime,
std::map<std::string, bufferlist> *attrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
virtual int read(const DoutPrefixProvider *dpp,
RGWSI_SysObj_Obj_GetObjState& read_state,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
bufferlist *bl, off_t ofs, off_t end,
ceph::real_time* pmtime, uint64_t* psize,
std::map<std::string, bufferlist> *attrs,
bool raw_attrs,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version>,
optional_yield y);
virtual int remove(const DoutPrefixProvider *dpp,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
optional_yield y);
virtual int write(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj,
real_time *pmtime,
std::map<std::string, bufferlist>& attrs,
bool exclusive,
const bufferlist& data,
RGWObjVersionTracker *objv_tracker,
real_time set_mtime,
optional_yield y);
virtual int write_data(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj,
const bufferlist& bl,
bool exclusive,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
virtual int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
const char *name, bufferlist *dest,
optional_yield y);
virtual int set_attrs(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
std::map<std::string, bufferlist>& attrs,
std::map<std::string, bufferlist> *rmattrs,
RGWObjVersionTracker *objv_tracker,
bool exclusive, optional_yield y);
virtual int omap_get_all(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, std::map<std::string, bufferlist> *m,
optional_yield y);
virtual int omap_get_vals(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj,
const std::string& marker,
uint64_t count,
std::map<std::string, bufferlist> *m,
bool *pmore,
optional_yield y);
virtual int omap_set(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj, const std::string& key,
bufferlist& bl, bool must_exist,
optional_yield y);
virtual int omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
const std::map<std::string, bufferlist>& m, bool must_exist,
optional_yield y);
virtual int omap_del(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key,
optional_yield y);
virtual int notify(const DoutPrefixProvider *dpp,
const rgw_raw_obj& obj, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl,
optional_yield y);
virtual int pool_list_prefixed_objs(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
const std::string& prefix,
std::function<void(const std::string&)> cb);
virtual int pool_list_objects_init(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
const std::string& marker,
const std::string& prefix,
RGWSI_SysObj::Pool::ListCtx *ctx);
virtual int pool_list_objects_next(const DoutPrefixProvider *dpp,
RGWSI_SysObj::Pool::ListCtx& ctx,
int max,
std::vector<std::string> *oids,
bool *is_truncated);
virtual int pool_list_objects_get_marker(RGWSI_SysObj::Pool::ListCtx& _ctx,
std::string *marker);
int stat(RGWSI_SysObj_Obj_GetObjState& state,
const rgw_raw_obj& obj,
std::map<std::string, bufferlist> *attrs,
bool raw_attrs,
real_time *lastmod,
uint64_t *obj_size,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
public:
RGWSI_SysObj_Core(CephContext *cct): RGWServiceInstance(cct) {}
RGWSI_Zone *get_zone_svc() {
return zone_svc;
}
};
| 5,988 | 40.020548 | 121 | h |
null | ceph-main/src/rgw/services/svc_sys_obj_core_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_service.h"
#include "svc_rados.h"
#include "svc_sys_obj_types.h"
struct RGWSI_SysObj_Core_GetObjState : public RGWSI_SysObj_Obj_GetObjState {
RGWSI_RADOS::Obj rados_obj;
bool has_rados_obj{false};
uint64_t last_ver{0};
RGWSI_SysObj_Core_GetObjState() {}
int get_rados_obj(const DoutPrefixProvider *dpp,
RGWSI_RADOS *rados_svc,
RGWSI_Zone *zone_svc,
const rgw_raw_obj& obj,
RGWSI_RADOS::Obj **pobj);
};
struct RGWSI_SysObj_Core_PoolListImplInfo : public RGWSI_SysObj_Pool_ListInfo {
RGWSI_RADOS::Pool pool;
RGWSI_RADOS::Pool::List op;
RGWAccessListFilterPrefix filter;
RGWSI_SysObj_Core_PoolListImplInfo(const std::string& prefix) : op(pool.op()), filter(prefix) {}
};
| 909 | 25 | 98 | h |
null | ceph-main/src/rgw/services/svc_tier_rados.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <iomanip>
#include "rgw_service.h"
#include "svc_rados.h"
extern const std::string MP_META_SUFFIX;
class RGWMPObj {
std::string oid;
std::string prefix;
std::string meta;
std::string upload_id;
public:
RGWMPObj() {}
RGWMPObj(const std::string& _oid, const std::string& _upload_id) {
init(_oid, _upload_id, _upload_id);
}
RGWMPObj(const std::string& _oid, std::optional<std::string> _upload_id) {
if (_upload_id) {
init(_oid, *_upload_id, *_upload_id);
} else {
from_meta(_oid);
}
}
void init(const std::string& _oid, const std::string& _upload_id) {
init(_oid, _upload_id, _upload_id);
}
void init(const std::string& _oid, const std::string& _upload_id, const std::string& part_unique_str) {
if (_oid.empty()) {
clear();
return;
}
oid = _oid;
upload_id = _upload_id;
prefix = oid + ".";
meta = prefix + upload_id + MP_META_SUFFIX;
prefix.append(part_unique_str);
}
const std::string& get_meta() const { return meta; }
std::string get_part(int num) const {
char buf[16];
snprintf(buf, 16, ".%d", num);
std::string s = prefix;
s.append(buf);
return s;
}
std::string get_part(const std::string& part) const {
std::string s = prefix;
s.append(".");
s.append(part);
return s;
}
const std::string& get_upload_id() const {
return upload_id;
}
const std::string& get_key() const {
return oid;
}
bool from_meta(const std::string& meta) {
int end_pos = meta.rfind('.'); // search for ".meta"
if (end_pos < 0)
return false;
int mid_pos = meta.rfind('.', end_pos - 1); // <key>.<upload_id>
if (mid_pos < 0)
return false;
oid = meta.substr(0, mid_pos);
upload_id = meta.substr(mid_pos + 1, end_pos - mid_pos - 1);
init(oid, upload_id, upload_id);
return true;
}
void clear() {
oid = "";
prefix = "";
meta = "";
upload_id = "";
}
friend std::ostream& operator<<(std::ostream& out, const RGWMPObj& obj) {
return out << "RGWMPObj:{ prefix=" << std::quoted(obj.prefix) <<
", meta=" << std::quoted(obj.meta) << " }";
}
}; // class RGWMPObj
/**
* A filter to a) test whether an object name is a multipart meta
* object, and b) filter out just the key used to determine the bucket
* index shard.
*
* Objects for multipart meta have names adorned with an upload id and
* other elements -- specifically a ".", MULTIPART_UPLOAD_ID_PREFIX,
* unique id, and MP_META_SUFFIX. This filter will return true when
* the name provided is such. It will also extract the key used for
* bucket index shard calculation from the adorned name.
*/
class MultipartMetaFilter : public RGWAccessListFilter {
public:
MultipartMetaFilter() {}
virtual ~MultipartMetaFilter() override;
/**
* @param name [in] The object name as it appears in the bucket index.
* @param key [out] An output parameter that will contain the bucket
* index key if this entry is in the form of a multipart meta object.
* @return true if the name provided is in the form of a multipart meta
* object, false otherwise
*/
bool filter(const std::string& name, std::string& key) override;
};
class RGWSI_Tier_RADOS : public RGWServiceInstance
{
RGWSI_Zone *zone_svc{nullptr};
public:
RGWSI_Tier_RADOS(CephContext *cct): RGWServiceInstance(cct) {}
void init(RGWSI_Zone *_zone_svc) {
zone_svc = _zone_svc;
}
static inline bool raw_obj_to_obj(const rgw_bucket& bucket, const rgw_raw_obj& raw_obj, rgw_obj *obj) {
ssize_t pos = raw_obj.oid.find('_', bucket.marker.length());
if (pos < 0) {
return false;
}
if (!rgw_obj_key::parse_raw_oid(raw_obj.oid.substr(pos + 1), &obj->key)) {
return false;
}
obj->bucket = bucket;
return true;
}
};
| 4,259 | 26.483871 | 105 | h |
null | ceph-main/src/rgw/services/svc_user.h |
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "svc_meta_be.h"
#include "rgw_service.h"
class RGWUserBuckets;
class RGWGetUserStats_CB;
class RGWSI_User : public RGWServiceInstance
{
public:
RGWSI_User(CephContext *cct);
virtual ~RGWSI_User();
static std::string get_meta_key(const rgw_user& user) {
return user.to_str();
}
static rgw_user user_from_meta_key(const std::string& key) {
return rgw_user(key);
}
virtual RGWSI_MetaBackend_Handler *get_be_handler() = 0;
/* base svc_user interfaces */
virtual int read_user_info(RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
RGWUserInfo *info,
RGWObjVersionTracker * const objv_tracker,
real_time * const pmtime,
rgw_cache_entry_info * const cache_info,
std::map<std::string, bufferlist> * const pattrs,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int store_user_info(RGWSI_MetaBackend::Context *ctx,
const RGWUserInfo& info,
RGWUserInfo *old_info,
RGWObjVersionTracker *objv_tracker,
const real_time& mtime,
bool exclusive,
std::map<std::string, bufferlist> *attrs,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int remove_user_info(RGWSI_MetaBackend::Context *ctx,
const RGWUserInfo& info,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int get_user_info_by_email(RGWSI_MetaBackend::Context *ctx,
const std::string& email, RGWUserInfo *info,
RGWObjVersionTracker *objv_tracker,
real_time *pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int get_user_info_by_swift(RGWSI_MetaBackend::Context *ctx,
const std::string& swift_name,
RGWUserInfo *info, /* out */
RGWObjVersionTracker * const objv_tracker,
real_time * const pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int get_user_info_by_access_key(RGWSI_MetaBackend::Context *ctx,
const std::string& access_key,
RGWUserInfo *info,
RGWObjVersionTracker* objv_tracker,
real_time *pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int add_bucket(const DoutPrefixProvider *dpp,
const rgw_user& user,
const rgw_bucket& bucket,
ceph::real_time creation_time,
optional_yield y) = 0;
virtual int remove_bucket(const DoutPrefixProvider *dpp,
const rgw_user& user,
const rgw_bucket& _bucket, optional_yield) = 0;
virtual int list_buckets(const DoutPrefixProvider *dpp,
const rgw_user& user,
const std::string& marker,
const std::string& end_marker,
uint64_t max,
RGWUserBuckets *buckets,
bool *is_truncated,
optional_yield y) = 0;
virtual int flush_bucket_stats(const DoutPrefixProvider *dpp,
const rgw_user& user,
const RGWBucketEnt& ent, optional_yield y) = 0;
virtual int complete_flush_stats(const DoutPrefixProvider *dpp,
const rgw_user& user, optional_yield y) = 0;
virtual int reset_bucket_stats(const DoutPrefixProvider *dpp,
const rgw_user& user,
optional_yield y) = 0;
virtual int read_stats(const DoutPrefixProvider *dpp,
RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, RGWStorageStats *stats,
ceph::real_time *last_stats_sync, /* last time a full stats sync completed */
ceph::real_time *last_stats_update,
optional_yield y) = 0; /* last time a stats update was done */
virtual int read_stats_async(const DoutPrefixProvider *dpp,
const rgw_user& user, RGWGetUserStats_CB *cb) = 0;
};
| 5,374 | 40.992188 | 89 | h |
null | ceph-main/src/rgw/services/svc_zone_utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_service.h"
class RGWSI_RADOS;
class RGWSI_Zone;
class RGWSI_ZoneUtils : public RGWServiceInstance
{
friend struct RGWServices_Def;
RGWSI_RADOS *rados_svc{nullptr};
RGWSI_Zone *zone_svc{nullptr};
std::string trans_id_suffix;
void init(RGWSI_RADOS *_rados_svc,
RGWSI_Zone *_zone_svc) {
rados_svc = _rados_svc;
zone_svc = _zone_svc;
}
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
void init_unique_trans_id_deps();
public:
RGWSI_ZoneUtils(CephContext *cct): RGWServiceInstance(cct) {}
std::string gen_host_id();
std::string unique_id(uint64_t unique_num);
std::string unique_trans_id(const uint64_t unique_num);
};
| 825 | 20.179487 | 71 | h |
null | ceph-main/src/test/admin_socket_output.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ADMIN_SOCKET_OUTPUT_H
#define CEPH_ADMIN_SOCKET_OUTPUT_H
#include <filesystem>
#include <string>
#include <map>
#include <set>
#include <vector>
namespace fs = std::filesystem;
using socket_results = std::map<std::string, std::string>;
using test_functions =
std::vector<std::pair<std::string, bool (*)(std::string &)>>;
class AdminSocketClient;
class AdminSocketOutput {
public:
AdminSocketOutput() {}
void add_target(const std::string &target);
void add_command(const std::string &target, const std::string &command);
void add_test(const std::string &target, const std::string &command,
bool (*test)(std::string &));
void postpone(const std::string &target, const std::string &command);
void exec();
void mod_for_vstart(const std::string& dir) {
socketdir = dir;
prefix = "";
}
private:
bool init_directories() const {
std::cout << "Checking " << socketdir << std::endl;
return exists(socketdir) && is_directory(socketdir);
}
bool init_sockets();
bool gather_socket_output();
std::string get_result(const std::string &target, const std::string &command) const;
std::pair<std::string, std::string>
run_command(AdminSocketClient &client, const std::string &raw_command,
bool send_untouched = false);
bool run_tests() const;
std::set<std::string> targets;
std::map<std::string, std::string> sockets;
std::map<std::string, socket_results> results;
std::map<std::string, std::vector<std::string>> custom_commands;
std::map<std::string, std::vector<std::string>> postponed_commands;
std::map<std::string, test_functions> tests;
std::string prefix = "ceph-";
fs::path socketdir = "/var/run/ceph";
};
#endif // CEPH_ADMIN_SOCKET_OUTPUT_H
| 2,157 | 27.025974 | 86 | h |
null | ceph-main/src/test/admin_socket_output_tests.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ADMIN_SOCKET_OUTPUT_TESTS_H
#define CEPH_ADMIN_SOCKET_OUTPUT_TESTS_H
// Test function declarations, definitions in admin_socket_output_tests.cc
// Example test function
/*
bool test_config_get_admin_socket(std::string& output);
*/
bool test_dump_pgstate_history(std::string& output);
#endif // CEPH_ADMIN_SOCKET_OUTPUT_TESTS_H
| 744 | 24.689655 | 74 | h |
null | ceph-main/src/test/gprof-helper.c | /* gprof-helper.c -- preload library to profile pthread-enabled programs
*
* Authors: Sam Hocevar <sam at zoy dot org>
* Daniel Jönsson <danieljo at fagotten dot org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the Do What The Fuck You Want To
* Public License as published by Banlu Kemiyatorn. See
* http://sam.zoy.org/projects/COPYING.WTFPL for more details.
*
* Compilation example:
* gcc -shared -fPIC gprof-helper.c -o gprof-helper.so -lpthread -ldl
*
* Usage example:
* LD_PRELOAD=./gprof-helper.so your_program
*/
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
#include <pthread.h>
static void * wrapper_routine(void *);
/* Original pthread function */
static int (*pthread_create_orig)(pthread_t *__restrict,
__const pthread_attr_t *__restrict,
void *(*)(void *),
void *__restrict) = NULL;
/* Library initialization function */
void wooinit(void) __attribute__((constructor));
void wooinit(void)
{
pthread_create_orig = dlsym(RTLD_NEXT, "pthread_create");
fprintf(stderr, "pthreads: using profiling hooks for gprof\n");
if(pthread_create_orig == NULL)
{
char *error = dlerror();
if(error == NULL)
{
error = "pthread_create is NULL";
}
fprintf(stderr, "%s\n", error);
exit(EXIT_FAILURE);
}
}
/* Our data structure passed to the wrapper */
typedef struct wrapper_s
{
void * (*start_routine)(void *);
void * arg;
pthread_mutex_t lock;
pthread_cond_t wait;
struct itimerval itimer;
} wrapper_t;
/* The wrapper function in charge for setting the itimer value */
static void * wrapper_routine(void * data)
{
/* Put user data in thread-local variables */
void * (*start_routine)(void *) = ((wrapper_t*)data)->start_routine;
void * arg = ((wrapper_t*)data)->arg;
/* Set the profile timer value */
setitimer(ITIMER_PROF, &((wrapper_t*)data)->itimer, NULL);
/* Tell the calling thread that we don't need its data anymore */
pthread_mutex_lock(&((wrapper_t*)data)->lock);
pthread_cond_signal(&((wrapper_t*)data)->wait);
pthread_mutex_unlock(&((wrapper_t*)data)->lock);
/* Call the real function */
return start_routine(arg);
}
/* Our wrapper function for the real pthread_create() */
int pthread_create(pthread_t *__restrict thread,
__const pthread_attr_t *__restrict attr,
void * (*start_routine)(void *),
void *__restrict arg)
{
wrapper_t wrapper_data;
int i_return;
/* Initialize the wrapper structure */
wrapper_data.start_routine = start_routine;
wrapper_data.arg = arg;
getitimer(ITIMER_PROF, &wrapper_data.itimer);
pthread_cond_init(&wrapper_data.wait, NULL);
pthread_mutex_init(&wrapper_data.lock, NULL);
pthread_mutex_lock(&wrapper_data.lock);
/* The real pthread_create call */
i_return = pthread_create_orig(thread,
attr,
&wrapper_routine,
&wrapper_data);
/* If the thread was successfully spawned, wait for the data
* to be released */
if(i_return == 0)
{
pthread_cond_wait(&wrapper_data.wait, &wrapper_data.lock);
}
pthread_mutex_unlock(&wrapper_data.lock);
pthread_mutex_destroy(&wrapper_data.lock);
pthread_cond_destroy(&wrapper_data.wait);
return i_return;
}
| 3,604 | 29.041667 | 72 | c |
null | ceph-main/src/test/kv_store_bench.h | /*
* Benchmarking suite for key-value store
*
* September 2, 2012
* Eleanor Cawthon
* [email protected]
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef KVSTOREBENCH_H_
#define KVSTOREBENCH_H_
#include "key_value_store/key_value_structure.h"
#include "key_value_store/kv_flat_btree_async.h"
#include "common/Clock.h"
#include "global/global_context.h"
#include "common/Cond.h"
#include <string>
#include <climits>
#include <cfloat>
#include <iostream>
/**
* stores pairings from op type to time taken for that op (for latency), and to
* time that op completed to the nearest second (for throughput).
*/
struct kv_bench_data {
JSONFormatter throughput_jf;
JSONFormatter latency_jf;
};
class KvStoreBench;
/**
* keeps track of the number of milliseconds between two events - used to
* measure latency
*/
struct StopWatch {
utime_t begin_time;
utime_t end_time;
void start_time() {
begin_time = ceph_clock_now();
}
void stop_time() {
end_time = ceph_clock_now();
}
double get_time() {
return (end_time - begin_time) * 1000;
}
void clear() {
begin_time = end_time = utime_t();
}
};
/**
* arguments passed to the callback method when the op is being timed
*/
struct timed_args {
StopWatch sw;
//kv_bench_data data;
KvStoreBench * kvsb;
ceph::buffer::list val;
int err;
char op;
timed_args ()
: kvsb(NULL),
err(0),
op(' ')
{};
timed_args (KvStoreBench * k)
: kvsb(k),
err(0),
op(' ')
{}
};
typedef std::pair<std::string, ceph::buffer::list> (KvStoreBench::*next_gen_t)(bool new_elem);
class KvStoreBench {
protected:
//test setup variables set from command line
int entries; //the number of entries to write initially
int ops; //the number of operations to time
int clients; //the total number of clients running this test - used
//in the aio test to coordinate the end of the initial sets
int key_size;//number of characters in keys to write
int val_size;//number of characters in values to write
int max_ops_in_flight;
bool clear_first;//if true, remove all objects in pool before starting tests
//variables passed to KeyValueStructure
int k;
int cache_size; //number of index entries to store in cache
double cache_refresh; //cache_size / cache_refresh entries are read each time
//the index is read
std::string client_name;
bool verbose;//if true, display debug output
//internal
std::map<int, char> probs;//map of numbers from 1 to 100 to chars representing
//operation types - used to generate random operations
std::set<std::string> key_set;//set of keys already in the data set
KeyValueStructure * kvs;
kv_bench_data data;//stores throughput and latency from completed tests
ceph::mutex data_lock = ceph::make_mutex("data lock");
ceph::condition_variable op_avail; // signaled when an op completes
int ops_in_flight;//number of operations currently in progress
ceph::mutex ops_in_flight_lock =
ceph::make_mutex("KvStoreBench::ops_in_flight_lock");
//these are used for cleanup and setup purposes - they are NOT passed to kvs!
librados::Rados rados;
std::string rados_id;
std::string pool_name;
bool io_ctx_ready;
librados::IoCtx io_ctx;
/**
* Prints JSON-formatted throughput and latency data.
*
* Throughput data is {'char representing the operation type':time the op
* completed to the nearest second}
* Latency is {'char representing the operation type':time taken by the op}
*/
void print_time_data();
public:
KvStoreBench();
//after this is called, objects created by the KeyValueStructure remain.
~KvStoreBench();
/**
* parses command line arguments, sets up this rados instance, clears the
* pool if clear_first is true and calls kvs->setup.
*/
int setup(int argc, const char** argv);
/**
* Returns a string of random characters of length len
*/
std::string random_string(int len);
/**
* Inserts entries random keys and values asynchronously.
*/
int test_random_insertions();
/**
* calls test_random_insertions, then does ops randomly chosen operations
* asynchronously, with max_ops_in_flight operations at a time.
*/
int test_teuthology_aio(next_gen_t distr, const std::map<int, char> &probs);
/**
* calls test_random_insertions, then does ops randomly chosen operations
* synchronously.
*/
int test_teuthology_sync(next_gen_t distr, const std::map<int, char> &probs);
/**
* returns a key-value pair. If new_elem is true, the key is randomly
* generated. If it is false, the key is selected from the keys currently in
* the key set.
*/
std::pair<std::string, ceph::buffer::list> rand_distr(bool new_elem);
/**
* Called when aio operations complete. Updates data.
*/
static void aio_callback_timed(int * err, void *arg);
/**
* Calls test_ methods. Change to call, for example, multiple runs of a test
* with different settings. Currently just calls test_teuthology_aio.
*/
int teuthology_tests();
};
#endif /* KVSTOREBENCH_H_ */
| 5,252 | 26.217617 | 94 | h |
null | ceph-main/src/test/omap_bench.h | /*
* Generate latency statistics for a configurable number of object map write
* operations of configurable size.
*
* Created on: May 21, 2012
* Author: Eleanor Cawthon
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef OMAP_BENCH_HPP_
#define OMAP_BENCH_HPP_
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "include/rados/librados.hpp"
#include <string>
#include <map>
#include <cfloat>
using ceph::bufferlist;
struct o_bench_data {
double avg_latency;
double min_latency;
double max_latency;
double total_latency;
int started_ops;
int completed_ops;
std::map<int,int> freq_map;
std::pair<int,int> mode;
o_bench_data()
: avg_latency(0.0), min_latency(DBL_MAX), max_latency(0.0),
total_latency(0.0),
started_ops(0), completed_ops(0)
{}
};
class OmapBench;
typedef int (*omap_generator_t)(const int omap_entries, const int key_size,
const int value_size,
std::map<std::string,bufferlist> *out_omap);
typedef int (OmapBench::*test_t)(omap_generator_t omap_gen);
class Writer{
protected:
std::string oid;
utime_t begin_time;
utime_t end_time;
std::map<std::string,bufferlist> omap;
OmapBench *ob;
friend class OmapBench;
public:
Writer(OmapBench *omap_bench);
virtual ~Writer(){};
virtual void start_time();
virtual void stop_time();
virtual double get_time();
virtual std::string get_oid();
virtual std::map<std::string,bufferlist> & get_omap();
};
class AioWriter : public Writer{
protected:
librados::AioCompletion * aioc;
friend class OmapBench;
public:
AioWriter(OmapBench *omap_bench);
~AioWriter() override;
virtual librados::AioCompletion * get_aioc();
virtual void set_aioc(librados::callback_t complete);
};
class OmapBench{
protected:
librados::IoCtx io_ctx;
librados::Rados rados;
struct o_bench_data data;
test_t test;
omap_generator_t omap_generator;
//aio things
ceph::condition_variable thread_is_free;
ceph::mutex thread_is_free_lock =
ceph::make_mutex("OmapBench::thread_is_free_lock");
ceph::mutex data_lock =
ceph::make_mutex("OmapBench::data_lock");
int busythreads_count;
librados::callback_t comp;
std::string pool_name;
std::string rados_id;
std::string prefix;
int threads;
int objects;
int entries_per_omap;
int key_size;
int value_size;
double increment;
friend class Writer;
friend class AioWriter;
public:
OmapBench()
: test(&OmapBench::test_write_objects_in_parallel),
omap_generator(generate_uniform_omap),
busythreads_count(0),
comp(aio_is_complete),
pool_name("rbd"),
rados_id("admin"),
prefix(rados_id+".obj."),
threads(3), objects(100), entries_per_omap(10), key_size(10),
value_size(100), increment(10)
{}
/**
* Parses command line args, initializes rados and ioctx
*/
int setup(int argc, const char** argv);
/**
* Callback for when an AioCompletion (called from an AioWriter)
* is complete. deletes the AioWriter that called it,
* Updates data, updates busythreads, and signals thread_is_free.
*
* @param c provided by aio_write - not used
* @param arg the AioWriter that contains this AioCompletion
*/
static void aio_is_complete(rados_completion_t c, void *arg);
/**
* Generates a random string len characters long
*/
static std::string random_string(int len);
/*
* runs the test specified by test using the omap generator specified by
* omap_generator
*
* @return error code
*/
int run();
/*
* Prints all keys and values for all omap entries for all objects
*/
int print_written_omap();
/*
* Displays relevant constants and the histogram generated through a test
*/
void print_results();
/**
* Writes an object with the specified AioWriter.
*
* @param aiow the AioWriter to write with
* @param omap the omap to write
* @post: an asynchronous omap_set is launched
*/
int write_omap_asynchronously(AioWriter *aiow,
const std::map<std::string,bufferlist> &map);
/**
* Generates an omap with omap_entries entries, each with keys key_size
* characters long and with string values value_size characters long.
*
* @param out_map pointer to the map to be created
* @return error code
*/
static int generate_uniform_omap(const int omap_entries, const int key_size,
const int value_size, std::map<std::string,bufferlist> * out_omap);
/**
* The same as generate_uniform_omap except that string lengths are picked
* randomly between 1 and the int arguments
*/
static int generate_non_uniform_omap(const int omap_entries,
const int key_size,
const int value_size, std::map<std::string,bufferlist> * out_omap);
static int generate_small_non_random_omap(const int omap_entries,
const int key_size, const int value_size,
std::map<std::string,bufferlist> * out_omap);
/*
* Uses aio_write to write omaps generated by omap_gen to OBJECTS objects
* using THREADS AioWriters at a time.
*
* @param omap_gen the method used to generate the omaps.
*/
int test_write_objects_in_parallel(omap_generator_t omap_gen);
};
#endif /* OMAP_BENCH_HPP_ */
| 5,375 | 24.971014 | 78 | h |
null | ceph-main/src/test/perf_helper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/* Copyright (c) 2011 Facebook
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CEPH_TEST_PERFHELPER_H
#define CEPH_TEST_PERFHELPER_H
namespace PerfHelper {
void flush_cache();
uint64_t plus_one(uint64_t x);
void throw_end_of_buffer();
void throw_int();
} // PerfHelper
#endif // CEPH_TEST_PERFHELPER_H
| 1,098 | 34.451613 | 77 | h |
null | ceph-main/src/test/ObjectMap/KeyValueDBMemory.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <map>
#include <set>
#include <string>
#include "kv/KeyValueDB.h"
#include "include/buffer.h"
#include "include/Context.h"
using std::string;
class KeyValueDBMemory : public KeyValueDB {
public:
std::map<std::pair<string,string>,bufferlist> db;
KeyValueDBMemory() { }
explicit KeyValueDBMemory(KeyValueDBMemory *db) : db(db->db) { }
~KeyValueDBMemory() override { }
int init(string _opt) override {
return 0;
}
int open(std::ostream &out, const std::string& cfs="") override {
return 0;
}
int create_and_open(std::ostream &out, const std::string& cfs="") override {
return 0;
}
int get(
const std::string &prefix,
const std::set<std::string> &key,
std::map<std::string, bufferlist> *out
) override;
using KeyValueDB::get;
int get_keys(
const std::string &prefix,
const std::set<std::string> &key,
std::set<std::string> *out
);
int set(
const std::string &prefix,
const std::string &key,
const bufferlist &bl
);
int rmkey(
const std::string &prefix,
const std::string &key
);
int rmkeys_by_prefix(
const std::string &prefix
);
int rm_range_keys(
const std::string &prefix,
const std::string &start,
const std::string &end
);
class TransactionImpl_ : public TransactionImpl {
public:
std::list<Context *> on_commit;
KeyValueDBMemory *db;
explicit TransactionImpl_(KeyValueDBMemory *db) : db(db) {}
struct SetOp : public Context {
KeyValueDBMemory *db;
std::pair<std::string,std::string> key;
bufferlist value;
SetOp(KeyValueDBMemory *db,
const std::pair<std::string,std::string> &key,
const bufferlist &value)
: db(db), key(key), value(value) {}
void finish(int r) override {
db->set(key.first, key.second, value);
}
};
void set(const std::string &prefix, const std::string &k, const bufferlist& bl) override {
on_commit.push_back(new SetOp(db, std::make_pair(prefix, k), bl));
}
struct RmKeysOp : public Context {
KeyValueDBMemory *db;
std::pair<std::string,std::string> key;
RmKeysOp(KeyValueDBMemory *db,
const std::pair<std::string,std::string> &key)
: db(db), key(key) {}
void finish(int r) override {
db->rmkey(key.first, key.second);
}
};
using KeyValueDB::TransactionImpl::rmkey;
using KeyValueDB::TransactionImpl::set;
void rmkey(const std::string &prefix, const std::string &key) override {
on_commit.push_back(new RmKeysOp(db, std::make_pair(prefix, key)));
}
struct RmKeysByPrefixOp : public Context {
KeyValueDBMemory *db;
std::string prefix;
RmKeysByPrefixOp(KeyValueDBMemory *db,
const std::string &prefix)
: db(db), prefix(prefix) {}
void finish(int r) override {
db->rmkeys_by_prefix(prefix);
}
};
void rmkeys_by_prefix(const std::string &prefix) override {
on_commit.push_back(new RmKeysByPrefixOp(db, prefix));
}
struct RmRangeKeys: public Context {
KeyValueDBMemory *db;
std::string prefix, start, end;
RmRangeKeys(KeyValueDBMemory *db, const std::string &prefix, const std::string &s, const std::string &e)
: db(db), prefix(prefix), start(s), end(e) {}
void finish(int r) {
db->rm_range_keys(prefix, start, end);
}
};
void rm_range_keys(const std::string &prefix, const std::string &start, const std::string &end) {
on_commit.push_back(new RmRangeKeys(db, prefix, start, end));
}
int complete() {
for (auto i = on_commit.begin();
i != on_commit.end();
on_commit.erase(i++)) {
(*i)->complete(0);
}
return 0;
}
~TransactionImpl_() override {
for (auto i = on_commit.begin();
i != on_commit.end();
on_commit.erase(i++)) {
delete *i;
}
}
};
Transaction get_transaction() override {
return Transaction(new TransactionImpl_(this));
}
int submit_transaction(Transaction trans) override {
return static_cast<TransactionImpl_*>(trans.get())->complete();
}
uint64_t get_estimated_size(std::map<std::string,uint64_t> &extras) override {
uint64_t total_size = 0;
for (auto& [key, bl] : db) {
string prefix = key.first;
uint64_t sz = bl.length();
total_size += sz;
if (extras.count(prefix) == 0)
extras[prefix] = 0;
extras[prefix] += sz;
}
return total_size;
}
private:
bool exists_prefix(const std::string &prefix) {
std::map<std::pair<std::string,std::string>,bufferlist>::iterator it;
it = db.lower_bound(std::make_pair(prefix, ""));
return ((it != db.end()) && ((*it).first.first == prefix));
}
friend class WholeSpaceMemIterator;
public:
WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) override;
};
| 4,938 | 25.132275 | 110 | h |
null | ceph-main/src/test/client/TestClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "gtest/gtest.h"
#include "common/async/context_pool.h"
#include "global/global_context.h"
#include "msg/Messenger.h"
#include "mon/MonClient.h"
#include "osdc/ObjectCacher.h"
#include "client/MetaRequest.h"
#include "client/Client.h"
#include "messages/MClientReclaim.h"
#include "messages/MClientSession.h"
#include "common/async/blocked_completion.h"
#define dout_subsys ceph_subsys_client
namespace bs = boost::system;
namespace ca = ceph::async;
class ClientScaffold : public Client {
public:
ClientScaffold(Messenger *m, MonClient *mc, Objecter *objecter_) : Client(m, mc, objecter_) {}
virtual ~ClientScaffold()
{ }
int check_dummy_op(const UserPerm& perms){
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied()) {
return -CEPHFS_ENOTCONN;
}
std::scoped_lock l(client_lock);
MetaRequest *req = new MetaRequest(CEPH_MDS_OP_DUMMY);
int res = make_request(req, perms);
ldout(cct, 10) << __func__ << " result=" << res << dendl;
return res;
}
int send_unknown_session_op(int op) {
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied()) {
return -CEPHFS_ENOTCONN;
}
std::scoped_lock l(client_lock);
auto session = _get_or_open_mds_session(0);
auto msg = make_message<MClientSession>(op, session->seq);
int res = session->con->send_message2(std::move(msg));
ldout(cct, 10) << __func__ << " result=" << res << dendl;
return res;
}
bool check_client_blocklisted() {
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied()) {
return -CEPHFS_ENOTCONN;
}
std::scoped_lock l(client_lock);
bs::error_code ec;
ldout(cct, 20) << __func__ << ": waiting for latest osdmap" << dendl;
objecter->wait_for_latest_osdmap(ca::use_blocked[ec]);
ldout(cct, 20) << __func__ << ": got latest osdmap: " << ec << dendl;
const auto myaddrs = messenger->get_myaddrs();
return objecter->with_osdmap([&](const OSDMap& o) {return o.is_blocklisted(myaddrs);});
}
bool check_unknown_reclaim_flag(uint32_t flag) {
RWRef_t mref_reader(mount_state, CLIENT_MOUNTING);
if (!mref_reader.is_state_satisfied()) {
return -CEPHFS_ENOTCONN;
}
std::scoped_lock l(client_lock);
char uuid[256];
sprintf(uuid, "unknownreclaimflag:%x", getpid());
auto session = _get_or_open_mds_session(0);
auto m = make_message<MClientReclaim>(uuid, flag);
ceph_assert(session->con->send_message2(std::move(m)) == 0);
wait_on_list(waiting_for_reclaim);
return session->reclaim_state == MetaSession::RECLAIM_FAIL ? true : false;
}
};
class TestClient : public ::testing::Test {
public:
static void SetUpTestSuite() {
icp.start(g_ceph_context->_conf.get_val<std::uint64_t>("client_asio_thread_count"));
}
static void TearDownTestSuite() {
icp.stop();
}
void SetUp() override {
messenger = Messenger::create_client_messenger(g_ceph_context, "client");
if (messenger->start() != 0) {
throw std::runtime_error("failed to start messenger");
}
mc = new MonClient(g_ceph_context, icp);
if (mc->build_initial_monmap() < 0) {
throw std::runtime_error("build monmap");
}
mc->set_messenger(messenger);
mc->set_want_keys(CEPH_ENTITY_TYPE_MDS | CEPH_ENTITY_TYPE_OSD);
if (mc->init() < 0) {
throw std::runtime_error("init monclient");
}
objecter = new Objecter(g_ceph_context, messenger, mc, icp);
objecter->set_client_incarnation(0);
objecter->init();
messenger->add_dispatcher_tail(objecter);
objecter->start();
client = new ClientScaffold(messenger, mc, objecter);
client->init();
client->mount("/", myperm, true);
}
void TearDown() override {
if (client->is_mounted())
client->unmount();
client->shutdown();
objecter->shutdown();
mc->shutdown();
messenger->shutdown();
messenger->wait();
delete client;
client = nullptr;
delete objecter;
objecter = nullptr;
delete mc;
mc = nullptr;
delete messenger;
messenger = nullptr;
}
protected:
static inline ceph::async::io_context_pool icp;
static inline UserPerm myperm{0,0};
MonClient* mc = nullptr;
Messenger* messenger = nullptr;
Objecter* objecter = nullptr;
ClientScaffold* client = nullptr;
};
| 4,990 | 32.05298 | 98 | h |
null | ceph-main/src/test/common/ObjectContents.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "include/interval_set.h"
#include "include/buffer_fwd.h"
#include <map>
#ifndef COMMON_OBJECT_H
#define COMMON_OBJECT_H
enum {
RANDOMWRITEFULL,
DELETED,
CLONERANGE
};
bool test_object_contents();
class ObjectContents {
uint64_t _size;
std::map<uint64_t, unsigned int> seeds;
interval_set<uint64_t> written;
bool _exists;
public:
class Iterator {
ObjectContents *parent;
std::map<uint64_t, unsigned int>::iterator iter;
unsigned int current_state;
int current_val;
uint64_t pos;
private:
unsigned int get_state(uint64_t pos);
public:
explicit Iterator(ObjectContents *parent) :
parent(parent), iter(parent->seeds.end()),
current_state(0), current_val(0), pos(-1) {
seek_to_first();
}
char operator*() {
return parent->written.contains(pos) ?
static_cast<char>(current_val % 256) : '\0';
}
uint64_t get_pos() {
return pos;
}
void seek_to(uint64_t _pos) {
if (pos > _pos ||
(iter != parent->seeds.end() && _pos >= iter->first)) {
iter = parent->seeds.upper_bound(_pos);
--iter;
current_state = iter->second;
current_val = rand_r(¤t_state);
pos = iter->first;
++iter;
}
while (pos < _pos) ++(*this);
}
void seek_to_first() {
seek_to(0);
}
Iterator &operator++() {
++pos;
if (iter != parent->seeds.end() && pos >= iter->first) {
ceph_assert(pos == iter->first);
current_state = iter->second;
++iter;
}
current_val = rand_r(¤t_state);
return *this;
}
bool valid() {
return pos < parent->size();
}
friend class ObjectContents;
};
ObjectContents() : _size(0), _exists(false) {
seeds[0] = 0;
}
explicit ObjectContents(bufferlist::const_iterator &bp) {
decode(_size, bp);
decode(seeds, bp);
decode(written, bp);
decode(_exists, bp);
}
void clone_range(ObjectContents &other,
interval_set<uint64_t> &intervals);
void write(unsigned int seed,
uint64_t from,
uint64_t len);
Iterator get_iterator() {
return Iterator(this);
}
uint64_t size() const { return _size; }
bool exists() { return _exists; }
void debug(std::ostream &out) {
out << "_size is " << _size << std::endl;
out << "seeds is: (";
for (std::map<uint64_t, unsigned int>::iterator i = seeds.begin();
i != seeds.end();
++i) {
out << "[" << i->first << "," << i->second << "], ";
}
out << ")" << std::endl;
out << "written is " << written << std::endl;
out << "_exists is " << _exists << std::endl;
}
void encode(bufferlist &bl) const {
using ceph::encode;
encode(_size, bl);
encode(seeds, bl);
encode(written, bl);
encode(_exists, bl);
}
};
#endif
| 2,837 | 22.073171 | 70 | h |
null | ceph-main/src/test/common/dns_messages.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_TEST_DNS_MESSAGES_H
#define CEPH_TEST_DNS_MESSAGES_H
#include "common/dns_resolve.h"
#include "gmock/gmock.h"
u_char ns_search_msg_ok_payload[] = {
0x00, 0x55, 0x85, 0x80, 0x00, 0x01, 0x00, 0x03, 0x00, 0x02, 0x00, 0x05, 0x09,
0x5F, 0x63, 0x65, 0x70, 0x68, 0x2D, 0x6D, 0x6F, 0x6E, 0x04, 0x5F, 0x74, 0x63,
0x70, 0x04, 0x63, 0x65, 0x70, 0x68, 0x03, 0x63, 0x6F, 0x6D, 0x00, 0x00, 0x21,
0x00, 0x01, 0xC0, 0x0C, 0x00, 0x21, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00,
0x16, 0x00, 0x0A, 0x00, 0x28, 0x1A, 0x85, 0x03, 0x6D, 0x6F, 0x6E, 0x01, 0x61,
0x04, 0x63, 0x65, 0x70, 0x68, 0x03, 0x63, 0x6F, 0x6D, 0x00, 0xC0, 0x0C, 0x00,
0x21, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x16, 0x00, 0x0A, 0x00, 0x19,
0x1A, 0x85, 0x03, 0x6D, 0x6F, 0x6E, 0x01, 0x63, 0x04, 0x63, 0x65, 0x70, 0x68,
0x03, 0x63, 0x6F, 0x6D, 0x00, 0xC0, 0x0C, 0x00, 0x21, 0x00, 0x01, 0x00, 0x09,
0x3A, 0x80, 0x00, 0x16, 0x00, 0x0A, 0x00, 0x23, 0x1A, 0x85, 0x03, 0x6D, 0x6F,
0x6E, 0x01, 0x62, 0x04, 0x63, 0x65, 0x70, 0x68, 0x03, 0x63, 0x6F, 0x6D, 0x00,
0xC0, 0x85, 0x00, 0x02, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x06, 0x03,
0x6E, 0x73, 0x32, 0xC0, 0x85, 0xC0, 0x85, 0x00, 0x02, 0x00, 0x01, 0x00, 0x09,
0x3A, 0x80, 0x00, 0x06, 0x03, 0x6E, 0x73, 0x31, 0xC0, 0x85, 0xC0, 0x5D, 0x00,
0x01, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x0D,
0xC0, 0x7F, 0x00, 0x01, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0,
0xA8, 0x01, 0x0C, 0xC0, 0x3B, 0x00, 0x01, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80,
0x00, 0x04, 0xC0, 0xA8, 0x01, 0x0B, 0xC0, 0xAD, 0x00, 0x01, 0x00, 0x01, 0x00,
0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x59, 0xC0, 0x9B, 0x00, 0x01,
0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0xFE
};
u_char ns_query_msg_mon_c_payload[] = {
0x46, 0x4D, 0x85, 0x80, 0x00, 0x01, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02, 0x03,
0x6D, 0x6F, 0x6E, 0x01, 0x63, 0x04, 0x63, 0x65, 0x70, 0x68, 0x03, 0x63, 0x6F,
0x6D, 0x00, 0x00, 0x01, 0x00, 0x01, 0xC0, 0x0C, 0x00, 0x01, 0x00, 0x01, 0x00,
0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x0D, 0xC0, 0x12, 0x00, 0x02,
0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x06, 0x03, 0x6E, 0x73, 0x31, 0xC0,
0x12, 0xC0, 0x12, 0x00, 0x02, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x06,
0x03, 0x6E, 0x73, 0x32, 0xC0, 0x12, 0xC0, 0x3C, 0x00, 0x01, 0x00, 0x01, 0x00,
0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x59, 0xC0, 0x4E, 0x00, 0x01,
0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0xFE
};
u_char ns_query_msg_mon_b_payload[] = {
0x64, 0xCC, 0x85, 0x80, 0x00, 0x01, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02, 0x03,
0x6D, 0x6F, 0x6E, 0x01, 0x62, 0x04, 0x63, 0x65, 0x70, 0x68, 0x03, 0x63, 0x6F,
0x6D, 0x00, 0x00, 0x01, 0x00, 0x01, 0xC0, 0x0C, 0x00, 0x01, 0x00, 0x01, 0x00,
0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x0C, 0xC0, 0x12, 0x00, 0x02,
0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x06, 0x03, 0x6E, 0x73, 0x32, 0xC0,
0x12, 0xC0, 0x12, 0x00, 0x02, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x06,
0x03, 0x6E, 0x73, 0x31, 0xC0, 0x12, 0xC0, 0x4E, 0x00, 0x01, 0x00, 0x01, 0x00,
0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x59, 0xC0, 0x3C, 0x00, 0x01,
0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0xFE
};
u_char ns_query_msg_mon_a_payload[] = {
0x86, 0xAD, 0x85, 0x80, 0x00, 0x01, 0x00, 0x01, 0x00, 0x02, 0x00, 0x02, 0x03,
0x6D, 0x6F, 0x6E, 0x01, 0x61, 0x04, 0x63, 0x65, 0x70, 0x68, 0x03, 0x63, 0x6F,
0x6D, 0x00, 0x00, 0x01, 0x00, 0x01, 0xC0, 0x0C, 0x00, 0x01, 0x00, 0x01, 0x00,
0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x0B, 0xC0, 0x12, 0x00, 0x02,
0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x06, 0x03, 0x6E, 0x73, 0x32, 0xC0,
0x12, 0xC0, 0x12, 0x00, 0x02, 0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x06,
0x03, 0x6E, 0x73, 0x31, 0xC0, 0x12, 0xC0, 0x4E, 0x00, 0x01, 0x00, 0x01, 0x00,
0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0x59, 0xC0, 0x3C, 0x00, 0x01,
0x00, 0x01, 0x00, 0x09, 0x3A, 0x80, 0x00, 0x04, 0xC0, 0xA8, 0x01, 0xFE
};
class MockResolvHWrapper : public ResolvHWrapper {
public:
#ifdef HAVE_RES_NQUERY
MOCK_METHOD6(res_nquery, int(res_state s, const char *hostname, int cls,
int type, u_char *buf, int bufsz));
MOCK_METHOD6(res_nsearch, int(res_state s, const char *hostname, int cls,
int type, u_char *buf, int bufsz));
#else
MOCK_METHOD5(res_query, int(const char *hostname, int cls,
int type, u_char *buf, int bufsz));
MOCK_METHOD5(res_search, int(const char *hostname, int cls,
int type, u_char *buf, int bufsz));
#endif
};
#endif
| 5,042 | 48.930693 | 80 | h |
null | ceph-main/src/test/crimson/gtest_seastar.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "gtest/gtest.h"
#include "seastar_runner.h"
struct seastar_test_suite_t : public ::testing::Test {
static SeastarRunner seastar_env;
template <typename Func>
void run(Func &&func) {
return seastar_env.run(std::forward<Func>(func));
}
template <typename Func>
void run_async(Func &&func) {
run(
[func=std::forward<Func>(func)]() mutable {
return seastar::async(std::forward<Func>(func));
});
}
virtual seastar::future<> set_up_fut() { return seastar::now(); }
void SetUp() final {
return run([this] { return set_up_fut(); });
}
virtual seastar::future<> tear_down_fut() { return seastar::now(); }
void TearDown() final {
return run([this] { return tear_down_fut(); });
}
};
| 856 | 22.805556 | 70 | h |
null | ceph-main/src/test/crimson/seastar_runner.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <stdio.h>
#include <signal.h>
#include <thread>
#include <seastar/core/app-template.hh>
#include <seastar/core/future-util.hh>
#include <seastar/core/reactor.hh>
#include <seastar/core/alien.hh>
#include <seastar/core/thread.hh>
struct SeastarRunner {
static constexpr eventfd_t APP_RUNNING = 1;
static constexpr eventfd_t APP_NOT_RUN = 2;
seastar::app_template app;
seastar::file_desc begin_fd;
std::unique_ptr<seastar::readable_eventfd> on_end;
std::thread thread;
bool begin_signaled = false;
SeastarRunner() :
begin_fd{seastar::file_desc::eventfd(0, 0)} {}
~SeastarRunner() {}
bool is_running() const {
return !!on_end;
}
int init(int argc, char **argv)
{
thread = std::thread([argc, argv, this] { reactor(argc, argv); });
eventfd_t result;
if (int r = ::eventfd_read(begin_fd.get(), &result); r < 0) {
std::cerr << "unable to eventfd_read():" << errno << std::endl;
return r;
}
assert(begin_signaled == true);
if (result == APP_RUNNING) {
assert(is_running());
return 0;
} else {
assert(result == APP_NOT_RUN);
assert(!is_running());
return 1;
}
}
void stop()
{
if (is_running()) {
run([this] {
on_end->write_side().signal(1);
return seastar::now();
});
}
thread.join();
}
void reactor(int argc, char **argv)
{
auto ret = app.run(argc, argv, [this] {
on_end.reset(new seastar::readable_eventfd);
return seastar::now().then([this] {
begin_signaled = true;
[[maybe_unused]] auto r = ::eventfd_write(begin_fd.get(), APP_RUNNING);
assert(r == 0);
return seastar::now();
}).then([this] {
return on_end->wait().then([](size_t){});
}).handle_exception([](auto ep) {
std::cerr << "Error: " << ep << std::endl;
}).finally([this] {
on_end.reset();
});
});
if (ret != 0) {
std::cerr << "Seastar app returns " << ret << std::endl;
}
if (!begin_signaled) {
begin_signaled = true;
::eventfd_write(begin_fd.get(), APP_NOT_RUN);
}
}
template <typename Func>
void run(Func &&func) {
assert(is_running());
auto fut = seastar::alien::submit_to(app.alien(), 0,
std::forward<Func>(func));
fut.get();
}
};
| 2,398 | 22.291262 | 72 | h |
null | ceph-main/src/test/crimson/test_messenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "msg/msg_types.h"
namespace ceph::net::test {
constexpr uint64_t CMD_CLI_NONCE = 1;
constexpr int64_t CMD_CLI_OSD = 1;
constexpr uint64_t TEST_NONCE = 2;
constexpr int64_t TEST_OSD = 2;
constexpr uint64_t CMD_SRV_NONCE = 3;
constexpr int64_t CMD_SRV_OSD = 3;
constexpr uint64_t TEST_PEER_NONCE = 2;
constexpr int64_t TEST_PEER_OSD = 4;
inline entity_addr_t get_test_peer_addr(
const entity_addr_t &cmd_peer_addr) {
entity_addr_t test_peer_addr = cmd_peer_addr;
test_peer_addr.set_port(cmd_peer_addr.get_port() + 1);
test_peer_addr.set_nonce(TEST_PEER_NONCE);
return test_peer_addr;
}
enum class cmd_t : char {
none = '\0',
shutdown,
suite_start,
suite_stop,
suite_connect_me,
suite_send_me,
suite_keepalive_me,
suite_markdown,
suite_recv_op
};
enum class policy_t : char {
none = '\0',
stateful_server,
stateless_server,
lossless_peer,
lossless_peer_reuse,
lossy_client,
lossless_client
};
inline std::ostream& operator<<(std::ostream& out, const cmd_t& cmd) {
switch(cmd) {
case cmd_t::none:
return out << "none";
case cmd_t::shutdown:
return out << "shutdown";
case cmd_t::suite_start:
return out << "suite_start";
case cmd_t::suite_stop:
return out << "suite_stop";
case cmd_t::suite_connect_me:
return out << "suite_connect_me";
case cmd_t::suite_send_me:
return out << "suite_send_me";
case cmd_t::suite_keepalive_me:
return out << "suite_keepalive_me";
case cmd_t::suite_markdown:
return out << "suite_markdown";
case cmd_t::suite_recv_op:
return out << "suite_recv_op";
default:
ceph_abort();
}
}
inline std::ostream& operator<<(std::ostream& out, const policy_t& policy) {
switch(policy) {
case policy_t::none:
return out << "none";
case policy_t::stateful_server:
return out << "stateful_server";
case policy_t::stateless_server:
return out << "stateless_server";
case policy_t::lossless_peer:
return out << "lossless_peer";
case policy_t::lossless_peer_reuse:
return out << "lossless_peer_reuse";
case policy_t::lossy_client:
return out << "lossy_client";
case policy_t::lossless_client:
return out << "lossless_client";
default:
ceph_abort();
}
}
} // namespace ceph::net::test
| 2,397 | 23.979167 | 76 | h |
null | ceph-main/src/test/crimson/seastore/test_block.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <random>
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore {
struct test_extent_desc_t {
size_t len = 0;
unsigned checksum = 0;
bool operator==(const test_extent_desc_t &rhs) const {
return (len == rhs.len &&
checksum == rhs.checksum);
}
bool operator!=(const test_extent_desc_t &rhs) const {
return !(*this == rhs);
}
};
struct test_block_delta_t {
int8_t val = 0;
uint16_t offset = 0;
uint16_t len = 0;
DENC(test_block_delta_t, v, p) {
DENC_START(1, 1, p);
denc(v.val, p);
denc(v.offset, p);
denc(v.len, p);
DENC_FINISH(p);
}
};
inline std::ostream &operator<<(
std::ostream &lhs, const test_extent_desc_t &rhs) {
return lhs << "test_extent_desc_t(len=" << rhs.len
<< ", checksum=" << rhs.checksum << ")";
}
struct TestBlock : crimson::os::seastore::LogicalCachedExtent {
constexpr static extent_len_t SIZE = 4<<10;
using Ref = TCachedExtentRef<TestBlock>;
std::vector<test_block_delta_t> delta = {};
TestBlock(ceph::bufferptr &&ptr)
: LogicalCachedExtent(std::move(ptr)) {}
TestBlock(const TestBlock &other)
: LogicalCachedExtent(other) {}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new TestBlock(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::TEST_BLOCK;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final;
void set_contents(char c, uint16_t offset, uint16_t len) {
::memset(get_bptr().c_str() + offset, c, len);
delta.push_back({c, offset, len});
}
void set_contents(char c) {
set_contents(c, 0, get_length());
}
test_extent_desc_t get_desc() {
return { get_length(), get_crc32c() };
}
void apply_delta(const ceph::bufferlist &bl) final;
};
using TestBlockRef = TCachedExtentRef<TestBlock>;
struct TestBlockPhysical : crimson::os::seastore::CachedExtent{
constexpr static extent_len_t SIZE = 4<<10;
using Ref = TCachedExtentRef<TestBlockPhysical>;
std::vector<test_block_delta_t> delta = {};
TestBlockPhysical(ceph::bufferptr &&ptr)
: CachedExtent(std::move(ptr)) {}
TestBlockPhysical(const TestBlockPhysical &other)
: CachedExtent(other) {}
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new TestBlockPhysical(*this));
};
static constexpr extent_types_t TYPE = extent_types_t::TEST_BLOCK_PHYSICAL;
extent_types_t get_type() const final {
return TYPE;
}
void set_contents(char c, uint16_t offset, uint16_t len) {
::memset(get_bptr().c_str() + offset, c, len);
delta.push_back({c, offset, len});
}
void set_contents(char c) {
set_contents(c, 0, get_length());
}
ceph::bufferlist get_delta() final;
void apply_delta_and_adjust_crc(paddr_t, const ceph::bufferlist &bl) final;
};
using TestBlockPhysicalRef = TCachedExtentRef<TestBlockPhysical>;
struct test_block_mutator_t {
std::uniform_int_distribution<int8_t>
contents_distribution = std::uniform_int_distribution<int8_t>(
std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max());
std::uniform_int_distribution<uint16_t>
offset_distribution = std::uniform_int_distribution<uint16_t>(
0, TestBlock::SIZE - 1);
std::uniform_int_distribution<uint16_t> length_distribution(uint16_t offset) {
return std::uniform_int_distribution<uint16_t>(
0, TestBlock::SIZE - offset - 1);
}
template <typename generator_t>
void mutate(TestBlock &block, generator_t &gen) {
auto offset = offset_distribution(gen);
block.set_contents(
contents_distribution(gen),
offset,
length_distribution(offset)(gen));
}
};
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::test_block_delta_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::test_extent_desc_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::TestBlock> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::TestBlockPhysical> : fmt::ostream_formatter {};
#endif
| 4,252 | 26.43871 | 105 | h |
null | ceph-main/src/test/crimson/seastore/transaction_manager_test_state.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <random>
#include <boost/iterator/counting_iterator.hpp>
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/extent_placement_manager.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
#include "crimson/os/seastore/seastore.h"
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/collection_manager/flat_collection_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include "crimson/os/seastore/journal/circular_bounded_journal.h"
#include "crimson/os/seastore/random_block_manager/block_rb_manager.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
class EphemeralDevices {
public:
virtual seastar::future<> setup() = 0;
virtual void remount() = 0;
virtual std::size_t get_num_devices() const = 0;
virtual void reset() = 0;
virtual std::vector<Device*> get_secondary_devices() = 0;
virtual ~EphemeralDevices() {}
virtual Device* get_primary_device() = 0;
virtual DeviceRef get_primary_device_ref() = 0;
virtual void set_primary_device_ref(DeviceRef) = 0;
};
using EphemeralDevicesRef = std::unique_ptr<EphemeralDevices>;
class EphemeralSegmentedDevices : public EphemeralDevices {
segment_manager::EphemeralSegmentManagerRef segment_manager;
std::list<segment_manager::EphemeralSegmentManagerRef> secondary_segment_managers;
std::size_t num_main_device_managers;
std::size_t num_cold_device_managers;
public:
EphemeralSegmentedDevices(std::size_t num_main_devices,
std::size_t num_cold_devices)
: num_main_device_managers(num_main_devices),
num_cold_device_managers(num_cold_devices)
{
auto num_device_managers = num_main_device_managers + num_cold_device_managers;
assert(num_device_managers > 0);
secondary_segment_managers.resize(num_device_managers - 1);
}
seastar::future<> setup() final {
segment_manager = segment_manager::create_test_ephemeral();
for (auto &sec_sm : secondary_segment_managers) {
sec_sm = segment_manager::create_test_ephemeral();
}
return segment_manager->init(
).safe_then([this] {
return crimson::do_for_each(
secondary_segment_managers.begin(),
secondary_segment_managers.end(),
[](auto &sec_sm)
{
return sec_sm->init();
});
}).safe_then([this] {
return segment_manager->mkfs(
segment_manager::get_ephemeral_device_config(
0, num_main_device_managers, num_cold_device_managers));
}).safe_then([this] {
return seastar::do_with(std::size_t(0), [this](auto &cnt) {
return crimson::do_for_each(
secondary_segment_managers.begin(),
secondary_segment_managers.end(),
[this, &cnt](auto &sec_sm)
{
++cnt;
return sec_sm->mkfs(
segment_manager::get_ephemeral_device_config(
cnt, num_main_device_managers, num_cold_device_managers));
});
});
}).handle_error(
crimson::ct_error::assert_all{}
);
}
void remount() final {
segment_manager->remount();
for (auto &sec_sm : secondary_segment_managers) {
sec_sm->remount();
}
}
std::size_t get_num_devices() const final {
return secondary_segment_managers.size() + 1;
}
void reset() final {
segment_manager.reset();
for (auto &sec_sm : secondary_segment_managers) {
sec_sm.reset();
}
}
std::vector<Device*> get_secondary_devices() final {
std::vector<Device*> sec_devices;
for (auto &sec_sm : secondary_segment_managers) {
sec_devices.emplace_back(sec_sm.get());
}
return sec_devices;
}
Device* get_primary_device() final {
return segment_manager.get();
}
DeviceRef get_primary_device_ref() final;
void set_primary_device_ref(DeviceRef) final;
};
class EphemeralRandomBlockDevices : public EphemeralDevices {
random_block_device::RBMDeviceRef rb_device;
std::list<random_block_device::RBMDeviceRef> secondary_rb_devices;
public:
EphemeralRandomBlockDevices(std::size_t num_device_managers) {
assert(num_device_managers > 0);
secondary_rb_devices.resize(num_device_managers - 1);
}
seastar::future<> setup() final {
rb_device = random_block_device::create_test_ephemeral();
device_config_t config = get_rbm_ephemeral_device_config(0, 1);
return rb_device->mkfs(config).handle_error(crimson::ct_error::assert_all{});
}
void remount() final {}
std::size_t get_num_devices() const final {
return secondary_rb_devices.size() + 1;
}
void reset() final {
rb_device.reset();
for (auto &sec_rb : secondary_rb_devices) {
sec_rb.reset();
}
}
std::vector<Device*> get_secondary_devices() final {
std::vector<Device*> sec_devices;
for (auto &sec_rb : secondary_rb_devices) {
sec_devices.emplace_back(sec_rb.get());
}
return sec_devices;
}
Device* get_primary_device() final {
return rb_device.get();
}
DeviceRef get_primary_device_ref() final;
void set_primary_device_ref(DeviceRef) final;
};
class EphemeralTestState {
protected:
journal_type_t journal_type;
size_t num_main_device_managers = 0;
size_t num_cold_device_managers = 0;
EphemeralDevicesRef devices;
bool secondary_is_cold;
EphemeralTestState(std::size_t num_main_device_managers,
std::size_t num_cold_device_managers) :
num_main_device_managers(num_main_device_managers),
num_cold_device_managers(num_cold_device_managers) {}
virtual seastar::future<> _init() = 0;
virtual seastar::future<> _destroy() = 0;
virtual seastar::future<> _teardown() = 0;
seastar::future<> teardown() {
return _teardown().then([this] {
return _destroy();
});
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() = 0;
virtual FuturizedStore::mount_ertr::future<> _mount() = 0;
seastar::future<> restart_fut() {
LOG_PREFIX(EphemeralTestState::restart_fut);
SUBINFO(test, "begin ...");
return teardown().then([this] {
devices->remount();
return _init().then([this] {
return _mount().handle_error(crimson::ct_error::assert_all{});
});
}).then([FNAME] {
SUBINFO(test, "finish");
});
}
void restart() {
restart_fut().get0();
}
seastar::future<> tm_setup(
journal_type_t type = journal_type_t::SEGMENTED) {
LOG_PREFIX(EphemeralTestState::tm_setup);
journal_type = type;
if (journal_type == journal_type_t::SEGMENTED) {
devices.reset(new
EphemeralSegmentedDevices(
num_main_device_managers, num_cold_device_managers));
} else {
assert(journal_type == journal_type_t::RANDOM_BLOCK);
//TODO: multiple devices
ceph_assert(num_main_device_managers == 1);
ceph_assert(num_cold_device_managers == 0);
devices.reset(new EphemeralRandomBlockDevices(1));
}
SUBINFO(test, "begin with {} devices ...", devices->get_num_devices());
return devices->setup(
).then([this] {
return _init();
}).then([this, FNAME] {
return _mkfs(
).safe_then([this] {
return restart_fut();
}).handle_error(
crimson::ct_error::assert_all{}
).then([FNAME] {
SUBINFO(test, "finish");
});
});
}
seastar::future<> tm_teardown() {
LOG_PREFIX(EphemeralTestState::tm_teardown);
SUBINFO(test, "begin");
return teardown().then([this, FNAME] {
devices->reset();
SUBINFO(test, "finish");
});
}
};
class TMTestState : public EphemeralTestState {
protected:
TransactionManagerRef tm;
LBAManager *lba_manager;
Cache* cache;
ExtentPlacementManager *epm;
uint64_t seq = 0;
TMTestState() : EphemeralTestState(1, 0) {}
TMTestState(std::size_t num_main_devices, std::size_t num_cold_devices)
: EphemeralTestState(num_main_devices, num_cold_devices) {}
virtual seastar::future<> _init() override {
auto sec_devices = devices->get_secondary_devices();
auto p_dev = devices->get_primary_device();
tm = make_transaction_manager(p_dev, sec_devices, true);
epm = tm->get_epm();
lba_manager = tm->get_lba_manager();
cache = tm->get_cache();
return seastar::now();
}
virtual seastar::future<> _destroy() override {
epm = nullptr;
lba_manager = nullptr;
cache = nullptr;
tm.reset();
return seastar::now();
}
virtual seastar::future<> _teardown() {
return tm->close().handle_error(
crimson::ct_error::assert_all{"Error in teardown"}
);
}
virtual FuturizedStore::mount_ertr::future<> _mount() {
return tm->mount(
).handle_error(
crimson::ct_error::assert_all{"Error in mount"}
).then([this] {
return epm->stop_background();
}).then([this] {
return epm->run_background_work_until_halt();
});
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() {
return tm->mkfs(
).handle_error(
crimson::ct_error::assert_all{"Error in mkfs"}
);
}
auto create_mutate_transaction() {
return tm->create_transaction(
Transaction::src_t::MUTATE, "test_mutate");
}
auto create_read_transaction() {
return tm->create_transaction(
Transaction::src_t::READ, "test_read");
}
auto create_weak_transaction() {
return tm->create_transaction(
Transaction::src_t::READ, "test_read_weak", true);
}
auto submit_transaction_fut2(Transaction& t) {
return tm->submit_transaction(t);
}
auto submit_transaction_fut(Transaction &t) {
return with_trans_intr(
t,
[this](auto &t) {
return tm->submit_transaction(t);
});
}
auto submit_transaction_fut_with_seq(Transaction &t) {
using ertr = TransactionManager::base_iertr;
return with_trans_intr(
t,
[this](auto &t) {
return tm->submit_transaction(t
).si_then([this] {
return ertr::make_ready_future<uint64_t>(seq++);
});
});
}
void submit_transaction(TransactionRef t) {
submit_transaction_fut(*t).unsafe_get0();
epm->run_background_work_until_halt().get0();
}
};
DeviceRef EphemeralSegmentedDevices::get_primary_device_ref() {
return std::move(segment_manager);
}
DeviceRef EphemeralRandomBlockDevices::get_primary_device_ref() {
return std::move(rb_device);
}
void EphemeralSegmentedDevices::set_primary_device_ref(DeviceRef dev) {
segment_manager =
segment_manager::EphemeralSegmentManagerRef(
static_cast<segment_manager::EphemeralSegmentManager*>(dev.release()));
}
void EphemeralRandomBlockDevices::set_primary_device_ref(DeviceRef dev) {
rb_device =
random_block_device::RBMDeviceRef(
static_cast<random_block_device::RBMDevice*>(dev.release()));
}
class SeaStoreTestState : public EphemeralTestState {
class TestMDStoreState {
std::map<std::string, std::string> md;
public:
class Store final : public SeaStore::MDStore {
TestMDStoreState &parent;
public:
Store(TestMDStoreState &parent) : parent(parent) {}
write_meta_ret write_meta(
const std::string& key, const std::string& value) final {
parent.md[key] = value;
return seastar::now();
}
read_meta_ret read_meta(const std::string& key) final {
auto iter = parent.md.find(key);
if (iter != parent.md.end()) {
return read_meta_ret(
read_meta_ertr::ready_future_marker{},
iter->second);
} else {
return read_meta_ret(
read_meta_ertr::ready_future_marker{},
std::nullopt);
}
}
};
Store get_mdstore() {
return Store(*this);
}
} mdstore_state;
protected:
std::unique_ptr<SeaStore> seastore;
FuturizedStore::Shard *sharded_seastore;
SeaStoreTestState() : EphemeralTestState(1, 0) {}
virtual seastar::future<> _init() final {
seastore = make_test_seastore(
std::make_unique<TestMDStoreState::Store>(mdstore_state.get_mdstore()));
return seastore->test_start(devices->get_primary_device_ref()
).then([this] {
sharded_seastore = &(seastore->get_sharded_store());
});
}
virtual seastar::future<> _destroy() final {
devices->set_primary_device_ref(seastore->get_primary_device_ref());
return seastore->stop().then([this] {
seastore.reset();
});
}
virtual seastar::future<> _teardown() final {
return seastore->umount();
}
virtual FuturizedStore::mount_ertr::future<> _mount() final {
return seastore->test_mount();
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() final {
return seastore->test_mkfs(uuid_d{});
}
};
| 12,787 | 28.063636 | 84 | h |
null | ceph-main/src/test/crimson/seastore/onode_tree/test_value.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <fmt/format.h>
#include "crimson/common/log.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/value.h"
namespace crimson::os::seastore::onode {
struct test_item_t {
using id_t = uint16_t;
using magic_t = uint32_t;
value_size_t size;
id_t id;
magic_t magic;
value_size_t get_payload_size() const {
assert(size > sizeof(value_header_t));
return static_cast<value_size_t>(size - sizeof(value_header_t));
}
static test_item_t create(std::size_t _size, std::size_t _id) {
ceph_assert(_size <= std::numeric_limits<value_size_t>::max());
ceph_assert(_size > sizeof(value_header_t));
value_size_t size = _size;
ceph_assert(_id <= std::numeric_limits<id_t>::max());
id_t id = _id;
return {size, id, (magic_t)id * 137};
}
};
inline std::ostream& operator<<(std::ostream& os, const test_item_t& item) {
return os << "TestItem(#" << item.id << ", " << item.size << "B)";
}
enum class delta_op_t : uint8_t {
UPDATE_ID,
UPDATE_TAIL_MAGIC,
};
inline std::ostream& operator<<(std::ostream& os, const delta_op_t op) {
switch (op) {
case delta_op_t::UPDATE_ID:
return os << "update_id";
case delta_op_t::UPDATE_TAIL_MAGIC:
return os << "update_tail_magic";
default:
return os << "unknown";
}
}
} // namespace crimson::os::seastore::onode
#if FMT_VERSION >= 90000
template<> struct fmt::formatter<crimson::os::seastore::onode::delta_op_t> : fmt::ostream_formatter {};
#endif
namespace crimson::os::seastore::onode {
template <value_magic_t MAGIC,
string_size_t MAX_NS_SIZE,
string_size_t MAX_OID_SIZE,
value_size_t MAX_VALUE_PAYLOAD_SIZE,
extent_len_t INTERNAL_NODE_SIZE,
extent_len_t LEAF_NODE_SIZE,
bool DO_SPLIT_CHECK>
class TestValue final : public Value {
public:
static constexpr tree_conf_t TREE_CONF = {
MAGIC,
MAX_NS_SIZE,
MAX_OID_SIZE,
MAX_VALUE_PAYLOAD_SIZE,
INTERNAL_NODE_SIZE,
LEAF_NODE_SIZE,
DO_SPLIT_CHECK
};
using id_t = test_item_t::id_t;
using magic_t = test_item_t::magic_t;
struct magic_packed_t {
magic_t value;
} __attribute__((packed));
private:
struct payload_t {
id_t id;
} __attribute__((packed));
struct Replayable {
static void set_id(NodeExtentMutable& payload_mut, id_t id) {
auto p_payload = get_write(payload_mut);
p_payload->id = id;
}
static void set_tail_magic(NodeExtentMutable& payload_mut, magic_t magic) {
auto length = payload_mut.get_length();
auto offset_magic = length - sizeof(magic_t);
payload_mut.copy_in_relative(offset_magic, magic);
}
private:
static payload_t* get_write(NodeExtentMutable& payload_mut) {
return reinterpret_cast<payload_t*>(payload_mut.get_write());
}
};
public:
class Recorder final : public ValueDeltaRecorder {
public:
Recorder(ceph::bufferlist& encoded)
: ValueDeltaRecorder(encoded) {}
~Recorder() override = default;
void encode_set_id(NodeExtentMutable& payload_mut, id_t id) {
auto& encoded = get_encoded(payload_mut);
ceph::encode(delta_op_t::UPDATE_ID, encoded);
ceph::encode(id, encoded);
}
void encode_set_tail_magic(NodeExtentMutable& payload_mut, magic_t magic) {
auto& encoded = get_encoded(payload_mut);
ceph::encode(delta_op_t::UPDATE_TAIL_MAGIC, encoded);
ceph::encode(magic, encoded);
}
protected:
value_magic_t get_header_magic() const override {
return TREE_CONF.value_magic;
}
void apply_value_delta(ceph::bufferlist::const_iterator& delta,
NodeExtentMutable& payload_mut,
laddr_t value_addr) override {
delta_op_t op;
try {
ceph::decode(op, delta);
switch (op) {
case delta_op_t::UPDATE_ID: {
logger().debug("OTree::TestValue::Replay: decoding UPDATE_ID ...");
id_t id;
ceph::decode(id, delta);
logger().debug("OTree::TestValue::Replay: apply id={} ...", id);
Replayable::set_id(payload_mut, id);
break;
}
case delta_op_t::UPDATE_TAIL_MAGIC: {
logger().debug("OTree::TestValue::Replay: decoding UPDATE_TAIL_MAGIC ...");
magic_t magic;
ceph::decode(magic, delta);
logger().debug("OTree::TestValue::Replay: apply magic={} ...", magic);
Replayable::set_tail_magic(payload_mut, magic);
break;
}
default:
logger().error("OTree::TestValue::Replay: got unknown op {} when replay {:#x}+{:#x}",
op, value_addr, payload_mut.get_length());
ceph_abort();
}
} catch (buffer::error& e) {
logger().error("OTree::TestValue::Replay: got decode error {} when replay {:#x}+{:#x}",
e.what(), value_addr, payload_mut.get_length());
ceph_abort();
}
}
private:
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
};
TestValue(NodeExtentManager& nm, const ValueBuilder& vb, Ref<tree_cursor_t>& p_cursor)
: Value(nm, vb, p_cursor) {}
~TestValue() override = default;
id_t get_id() const {
return read_payload<payload_t>()->id;
}
void set_id_replayable(Transaction& t, id_t id) {
auto value_mutable = prepare_mutate_payload<payload_t, Recorder>(t);
if (value_mutable.second) {
value_mutable.second->encode_set_id(value_mutable.first, id);
}
Replayable::set_id(value_mutable.first, id);
}
magic_t get_tail_magic() const {
auto p_payload = read_payload<payload_t>();
auto offset_magic = get_payload_size() - sizeof(magic_t);
auto p_magic = reinterpret_cast<const char*>(p_payload) + offset_magic;
return reinterpret_cast<const magic_packed_t*>(p_magic)->value;
}
void set_tail_magic_replayable(Transaction& t, magic_t magic) {
auto value_mutable = prepare_mutate_payload<payload_t, Recorder>(t);
if (value_mutable.second) {
value_mutable.second->encode_set_tail_magic(value_mutable.first, magic);
}
Replayable::set_tail_magic(value_mutable.first, magic);
}
/*
* tree_util.h related interfaces
*/
using item_t = test_item_t;
void initialize(Transaction& t, const item_t& item) {
ceph_assert(get_payload_size() + sizeof(value_header_t) == item.size);
set_id_replayable(t, item.id);
set_tail_magic_replayable(t, item.magic);
}
void validate(const item_t& item) const {
ceph_assert(get_payload_size() + sizeof(value_header_t) == item.size);
ceph_assert(get_id() == item.id);
ceph_assert(get_tail_magic() == item.magic);
}
};
using UnboundedValue = TestValue<
value_magic_t::TEST_UNBOUND, 4096, 4096, 4096, 4096, 4096, false>;
using BoundedValue = TestValue<
value_magic_t::TEST_BOUNDED, 320, 320, 640, 4096, 4096, true>;
// should be the same configuration with FLTreeOnode
using ExtendedValue = TestValue<
value_magic_t::TEST_EXTENDED, 256, 2048, 1200, 8192, 16384, true>;
}
#if FMT_VERSION >= 90000
template<>
struct fmt::formatter<crimson::os::seastore::onode::test_item_t> : fmt::ostream_formatter {};
#endif
| 7,324 | 29.394191 | 103 | h |
null | ceph-main/src/test/direct_messenger/DirectMessenger.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MSG_DIRECTMESSENGER_H
#define CEPH_MSG_DIRECTMESSENGER_H
#include "msg/SimplePolicyMessenger.h"
#include "common/Semaphore.h"
class DispatchStrategy;
/**
* DirectMessenger provides a direct path between two messengers
* within a process. A pair of DirectMessengers share their
* DispatchStrategy with each other, and calls to send_message()
* forward the message directly to the other.
*
* This is for testing and i/o injection only, and cannot be used
* for normal messengers with ms_type.
*/
class DirectMessenger : public SimplePolicyMessenger {
private:
/// strategy for local dispatch
std::unique_ptr<DispatchStrategy> dispatchers;
/// peer instance for comparison in get_connection()
entity_inst_t peer_inst;
/// connection that sends to the peer's dispatchers
ConnectionRef peer_connection;
/// connection that sends to my own dispatchers
ConnectionRef loopback_connection;
/// semaphore for signalling wait() from shutdown()
Semaphore sem;
public:
DirectMessenger(CephContext *cct, entity_name_t name,
string mname, uint64_t nonce,
DispatchStrategy *dispatchers);
~DirectMessenger();
/// attach to a peer messenger. must be called before start()
int set_direct_peer(DirectMessenger *peer);
// Messenger interface
/// sets the addr. must not be called after set_direct_peer() or start()
int bind(const entity_addr_t& bind_addr) override;
/// sets the addr. must not be called after set_direct_peer() or start()
int client_bind(const entity_addr_t& bind_addr) override;
/// starts dispatchers
int start() override;
/// breaks connections, stops dispatchers, and unblocks callers of wait()
int shutdown() override;
/// blocks until shutdown() completes
void wait() override;
/// returns a connection to the peer instance, a loopback connection to our
/// own instance, or null if not connected
ConnectionRef get_connection(const entity_inst_t& dst) override;
/// returns a loopback connection that dispatches to this messenger
ConnectionRef get_loopback_connection() override;
/// dispatches a message to the peer instance if connected
int send_message(Message *m, const entity_inst_t& dst) override;
/// mark down the connection for the given address
void mark_down(const entity_addr_t& a) override;
/// mark down all connections
void mark_down_all() override;
// unimplemented Messenger interface
void set_addr_unknowns(const entity_addr_t &addr) override {}
void set_addr(const entity_addr_t &addr) override {}
int get_dispatch_queue_len() override { return 0; }
double get_dispatch_queue_max_age(utime_t now) override { return 0; }
void set_cluster_protocol(int p) override {}
};
#endif
| 3,186 | 31.191919 | 77 | h |
null | ceph-main/src/test/direct_messenger/DispatchStrategy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef DISPATCH_STRATEGY_H
#define DISPATCH_STRATEGY_H
#include "msg/Message.h"
class Messenger;
class DispatchStrategy
{
protected:
Messenger *msgr = nullptr;
public:
DispatchStrategy() {}
Messenger *get_messenger() { return msgr; }
void set_messenger(Messenger *_msgr) { msgr = _msgr; }
virtual void ds_dispatch(Message *m) = 0;
virtual void shutdown() = 0;
virtual void start() = 0;
virtual void wait() = 0;
virtual ~DispatchStrategy() {}
};
#endif /* DISPATCH_STRATEGY_H */
| 906 | 22.868421 | 70 | h |
null | ceph-main/src/test/direct_messenger/FastStrategy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef FAST_STRATEGY_H
#define FAST_STRATEGY_H
#include "DispatchStrategy.h"
class FastStrategy : public DispatchStrategy {
public:
FastStrategy() {}
void ds_dispatch(Message *m) override {
msgr->ms_fast_preprocess(m);
if (msgr->ms_can_fast_dispatch(m))
msgr->ms_fast_dispatch(m);
else
msgr->ms_deliver_dispatch(m);
}
void shutdown() override {}
void start() override {}
void wait() override {}
virtual ~FastStrategy() {}
};
#endif /* FAST_STRATEGY_H */
| 900 | 24.027778 | 70 | h |
null | ceph-main/src/test/direct_messenger/QueueStrategy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CohortFS, LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef QUEUE_STRATEGY_H
#define QUEUE_STRATEGY_H
#include <vector>
#include <memory>
#include <boost/intrusive/list.hpp>
#include "DispatchStrategy.h"
#include "msg/Messenger.h"
namespace bi = boost::intrusive;
class QueueStrategy : public DispatchStrategy {
ceph::mutex lock = ceph::make_mutex("QueueStrategy::lock");
const int n_threads;
bool stop;
Message::Queue mqueue;
class QSThread : public Thread {
public:
bi::list_member_hook<> thread_q;
QueueStrategy *dq;
ceph::condition_variable cond;
explicit QSThread(QueueStrategy *dq) : thread_q(), dq(dq) {}
void* entry() {
dq->entry(this);
return NULL;
}
typedef bi::list< QSThread,
bi::member_hook< QSThread,
bi::list_member_hook<>,
&QSThread::thread_q > > Queue;
};
std::vector<std::unique_ptr<QSThread>> threads; //< all threads
QSThread::Queue disp_threads; //< waiting threads
public:
explicit QueueStrategy(int n_threads);
void ds_dispatch(Message *m) override;
void shutdown() override;
void start() override;
void wait() override;
void entry(QSThread *thrd);
virtual ~QueueStrategy() {}
};
#endif /* QUEUE_STRATEGY_H */
| 1,597 | 23.96875 | 70 | h |
null | ceph-main/src/test/erasure-code/ErasureCodeExample.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_EXAMPLE_H
#define CEPH_ERASURE_CODE_EXAMPLE_H
#include <unistd.h>
#include <errno.h>
#include <algorithm>
#include <sstream>
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "erasure-code/ErasureCode.h"
#define FIRST_DATA_CHUNK 0
#define SECOND_DATA_CHUNK 1
#define DATA_CHUNKS 2u
#define CODING_CHUNK 2
#define CODING_CHUNKS 1u
#define MINIMUM_TO_RECOVER 2u
class ErasureCodeExample final : public ErasureCode {
public:
~ErasureCodeExample() override {}
int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const override {
return crush.add_simple_rule(name, "default", "host", "",
"indep", pg_pool_t::TYPE_ERASURE, ss);
}
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override {
//
// If one chunk is more expensive to fetch than the others,
// recover it instead. For instance, if the cost reflects the
// time it takes for a chunk to be retrieved from a remote
// OSD and if CPU is cheap, it could make sense to recover
// instead of fetching the chunk.
//
std::map<int, int> c2c(available);
if (c2c.size() > DATA_CHUNKS) {
if (c2c[FIRST_DATA_CHUNK] > c2c[SECOND_DATA_CHUNK] &&
c2c[FIRST_DATA_CHUNK] > c2c[CODING_CHUNK])
c2c.erase(FIRST_DATA_CHUNK);
else if(c2c[SECOND_DATA_CHUNK] > c2c[FIRST_DATA_CHUNK] &&
c2c[SECOND_DATA_CHUNK] > c2c[CODING_CHUNK])
c2c.erase(SECOND_DATA_CHUNK);
else if(c2c[CODING_CHUNK] > c2c[FIRST_DATA_CHUNK] &&
c2c[CODING_CHUNK] > c2c[SECOND_DATA_CHUNK])
c2c.erase(CODING_CHUNK);
}
std::set <int> available_chunks;
for (std::map<int, int>::const_iterator i = c2c.begin();
i != c2c.end();
++i)
available_chunks.insert(i->first);
return _minimum_to_decode(want_to_read, available_chunks, minimum);
}
unsigned int get_chunk_count() const override {
return DATA_CHUNKS + CODING_CHUNKS;
}
unsigned int get_data_chunk_count() const override {
return DATA_CHUNKS;
}
unsigned int get_chunk_size(unsigned int object_size) const override {
return ( object_size / DATA_CHUNKS ) + 1;
}
int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) override {
//
// make sure all data chunks have the same length, allocating
// padding if necessary.
//
unsigned int chunk_length = get_chunk_size(in.length());
bufferlist out(in);
unsigned int width = get_chunk_count() * get_chunk_size(in.length());
bufferptr pad(width - in.length());
pad.zero(0, get_data_chunk_count());
out.push_back(pad);
//
// compute the coding chunk with first chunk ^ second chunk
//
char *p = out.c_str();
for (unsigned i = 0; i < chunk_length; i++)
p[i + CODING_CHUNK * chunk_length] =
p[i + FIRST_DATA_CHUNK * chunk_length] ^
p[i + SECOND_DATA_CHUNK * chunk_length];
//
// populate the bufferlist with bufferptr pointing
// to chunk boundaries
//
const bufferptr &ptr = out.front();
for (auto j = want_to_encode.begin();
j != want_to_encode.end();
++j) {
bufferlist tmp;
bufferptr chunk(ptr, (*j) * chunk_length, chunk_length);
tmp.push_back(chunk);
tmp.claim_append((*encoded)[*j]);
(*encoded)[*j].swap(tmp);
}
return 0;
}
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, bufferlist> *encoded) override {
ceph_abort();
return 0;
}
int _decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) override {
//
// All chunks have the same size
//
unsigned chunk_length = (*chunks.begin()).second.length();
for (std::set<int>::iterator i = want_to_read.begin();
i != want_to_read.end();
++i) {
if (chunks.find(*i) != chunks.end()) {
//
// If the chunk is available, just copy the bufferptr pointer
// to the decoded argument.
//
(*decoded)[*i] = chunks.find(*i)->second;
} else if(chunks.size() != 2) {
//
// If a chunk is missing and there are not enough chunks
// to recover, abort.
//
return -ERANGE;
} else {
//
// No matter what the missing chunk is, XOR of the other
// two recovers it.
//
std::map<int, bufferlist>::const_iterator k = chunks.begin();
const char *a = k->second.front().c_str();
++k;
const char *b = k->second.front().c_str();
bufferptr chunk(chunk_length);
char *c = chunk.c_str();
for (unsigned j = 0; j < chunk_length; j++) {
c[j] = a[j] ^ b[j];
}
bufferlist tmp;
tmp.append(chunk);
tmp.claim_append((*decoded)[*i]);
(*decoded)[*i].swap(tmp);
}
}
return 0;
}
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) override {
ceph_abort();
return 0;
}
const std::vector<int> &get_chunk_mapping() const override {
static std::vector<int> mapping;
return mapping;
}
};
#endif
| 5,876 | 28.984694 | 78 | h |
null | ceph-main/src/test/erasure-code/ceph_erasure_code_benchmark.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_BENCHMARK_H
#define CEPH_ERASURE_CODE_BENCHMARK_H
#include <string>
#include <map>
#include <vector>
#include <boost/intrusive_ptr.hpp>
#include "include/buffer.h"
#include "common/ceph_context.h"
#include "erasure-code/ErasureCodeInterface.h"
class ErasureCodeBench {
int in_size;
int max_iterations;
int erasures;
int k;
int m;
std::string plugin;
bool exhaustive_erasures;
std::vector<int> erased;
std::string workload;
ceph::ErasureCodeProfile profile;
bool verbose;
boost::intrusive_ptr<CephContext> cct;
public:
int setup(int argc, char** argv);
int run();
int decode_erasures(const std::map<int, ceph::buffer::list> &all_chunks,
const std::map<int, ceph::buffer::list> &chunks,
unsigned i,
unsigned want_erasures,
ErasureCodeInterfaceRef erasure_code);
int decode();
int encode();
};
#endif
| 1,482 | 22.539683 | 74 | h |
null | ceph-main/src/test/fio/ring_buffer.h | /*
* Very simple and fast lockless ring buffer implementatation for
* one producer and one consumer.
*/
#include <stdint.h>
#include <stddef.h>
/* Do not overcomplicate, choose generic x86 case */
#define L1_CACHE_BYTES 64
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
struct ring_buffer
{
unsigned int read_idx __cacheline_aligned;
unsigned int write_idx __cacheline_aligned;
unsigned int size;
unsigned int low_mask;
unsigned int high_mask;
unsigned int bit_shift;
void *data_ptr;
};
static inline unsigned int upper_power_of_two(unsigned int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
static inline int ring_buffer_init(struct ring_buffer* rbuf, unsigned int size)
{
/* Must be pow2 */
if (((size-1) & size))
size = upper_power_of_two(size);
size *= sizeof(void *);
rbuf->data_ptr = malloc(size);
rbuf->size = size;
rbuf->read_idx = 0;
rbuf->write_idx = 0;
rbuf->bit_shift = __builtin_ffs(sizeof(void *))-1;
rbuf->low_mask = rbuf->size - 1;
rbuf->high_mask = rbuf->size * 2 - 1;
return 0;
}
static inline void ring_buffer_deinit(struct ring_buffer* rbuf)
{
free(rbuf->data_ptr);
}
static inline unsigned int ring_buffer_used_size(const struct ring_buffer* rbuf)
{
__sync_synchronize();
return ((rbuf->write_idx - rbuf->read_idx) & rbuf->high_mask) >>
rbuf->bit_shift;
}
static inline void ring_buffer_enqueue(struct ring_buffer* rbuf, void *ptr)
{
unsigned int idx;
/*
* Be aware: we do not check that buffer can be full,
* assume user of the ring buffer can't submit more.
*/
idx = rbuf->write_idx & rbuf->low_mask;
*(void **)((uintptr_t)rbuf->data_ptr + idx) = ptr;
/* Barrier to be sure stored pointer will be seen properly */
__sync_synchronize();
rbuf->write_idx = (rbuf->write_idx + sizeof(ptr)) & rbuf->high_mask;
}
static inline void *ring_buffer_dequeue(struct ring_buffer* rbuf)
{
unsigned idx;
void *ptr;
/*
* Be aware: we do not check that buffer can be empty,
* assume user of the ring buffer called ring_buffer_used_size(),
* which returns actual used size and introduces memory barrier
* explicitly.
*/
idx = rbuf->read_idx & rbuf->low_mask;
ptr = *(void **)((uintptr_t)rbuf->data_ptr + idx);
rbuf->read_idx = (rbuf->read_idx + sizeof(ptr)) & rbuf->high_mask;
return ptr;
}
| 2,420 | 22.504854 | 80 | h |
null | ceph-main/src/test/immutable_object_cache/MockCacheDaemon.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
#define IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
#include <iostream>
#include <unistd.h>
#include "gmock/gmock.h"
#include "include/Context.h"
#include "tools/immutable_object_cache/CacheClient.h"
namespace ceph {
namespace immutable_obj_cache {
class MockCacheClient {
public:
MockCacheClient(const std::string& file, CephContext* ceph_ctx) {}
MOCK_METHOD0(run, void());
MOCK_METHOD0(is_session_work, bool());
MOCK_METHOD0(close, void());
MOCK_METHOD0(stop, void());
MOCK_METHOD0(connect, int());
MOCK_METHOD1(connect, void(Context*));
MOCK_METHOD6(lookup_object, void(std::string, uint64_t, uint64_t, uint64_t,
std::string, CacheGenContextURef));
MOCK_METHOD1(register_client, int(Context*));
};
class MockCacheServer {
public:
MockCacheServer(CephContext* cct, const std::string& file,
ProcessMsg processmsg) {
}
MOCK_METHOD0(run, int());
MOCK_METHOD0(start_accept, int());
MOCK_METHOD0(stop, int());
};
} // namespace immutable_obj_cach3
} // namespace ceph
#endif // IMMUTABLE_OBJECT_CACHE_MOCK_DAEMON
| 1,237 | 25.913043 | 77 | h |
null | ceph-main/src/test/journal/RadosTestFixture.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librados/test.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "journal/JournalMetadata.h"
#include "cls/journal/cls_journal_types.h"
#include "gtest/gtest.h"
class ThreadPool;
class RadosTestFixture : public ::testing::Test {
public:
static void SetUpTestCase();
static void TearDownTestCase();
static std::string get_temp_oid();
RadosTestFixture();
void SetUp() override;
void TearDown() override;
int create(const std::string &oid, uint8_t order = 14,
uint8_t splay_width = 2);
ceph::ref_t<journal::JournalMetadata> create_metadata(const std::string &oid,
const std::string &client_id = "client",
double commit_internal = 0.1,
int max_concurrent_object_sets = 0);
int append(const std::string &oid, const bufferlist &bl);
int client_register(const std::string &oid, const std::string &id = "client",
const std::string &description = "");
int client_commit(const std::string &oid, const std::string &id,
const cls::journal::ObjectSetPosition &commit_position);
bufferlist create_payload(const std::string &payload);
struct Listener : public journal::JournalMetadataListener {
RadosTestFixture *test_fixture;
ceph::mutex mutex = ceph::make_mutex("mutex");
ceph::condition_variable cond;
std::map<journal::JournalMetadata*, uint32_t> updates;
Listener(RadosTestFixture *_test_fixture)
: test_fixture(_test_fixture) {}
void handle_update(journal::JournalMetadata *metadata) override {
std::lock_guard locker{mutex};
++updates[metadata];
cond.notify_all();
}
};
int init_metadata(const ceph::ref_t<journal::JournalMetadata>& metadata);
bool wait_for_update(const ceph::ref_t<journal::JournalMetadata>& metadata);
static std::string _pool_name;
static librados::Rados _rados;
static uint64_t _oid_number;
static ThreadPool *_thread_pool;
librados::IoCtx m_ioctx;
ContextWQ *m_work_queue = nullptr;
ceph::mutex m_timer_lock;
SafeTimer *m_timer = nullptr;
Listener m_listener;
std::list<ceph::ref_t<journal::JournalMetadata>> m_metadatas;
};
| 2,386 | 30.826667 | 86 | h |
null | ceph-main/src/test/journal/mock/MockJournaler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef TEST_RBD_MIRROR_MOCK_JOURNALER_H
#define TEST_RBD_MIRROR_MOCK_JOURNALER_H
#include <gmock/gmock.h>
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "cls/journal/cls_journal_types.h"
#include "journal/Journaler.h"
#include <iosfwd>
#include <string>
class Context;
namespace journal {
struct ReplayHandler;
struct Settings;
struct MockFuture {
static MockFuture *s_instance;
static MockFuture &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockFuture() {
s_instance = this;
}
MOCK_CONST_METHOD0(is_valid, bool());
MOCK_METHOD1(flush, void(Context *));
MOCK_METHOD1(wait, void(Context *));
};
struct MockFutureProxy {
bool is_valid() const {
return MockFuture::get_instance().is_valid();
}
void flush(Context *on_safe) {
MockFuture::get_instance().flush(on_safe);
}
void wait(Context *on_safe) {
MockFuture::get_instance().wait(on_safe);
}
};
struct MockReplayEntry {
static MockReplayEntry *s_instance;
static MockReplayEntry &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockReplayEntry() {
s_instance = this;
}
MOCK_CONST_METHOD0(get_commit_tid, uint64_t());
MOCK_CONST_METHOD0(get_data, bufferlist());
};
struct MockReplayEntryProxy {
uint64_t get_commit_tid() const {
return MockReplayEntry::get_instance().get_commit_tid();
}
bufferlist get_data() const {
return MockReplayEntry::get_instance().get_data();
}
};
struct MockJournaler {
static MockJournaler *s_instance;
static MockJournaler &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockJournaler() {
s_instance = this;
}
MOCK_METHOD0(construct, void());
MOCK_METHOD1(init, void(Context *));
MOCK_METHOD0(shut_down, void());
MOCK_METHOD1(shut_down, void(Context *));
MOCK_CONST_METHOD0(is_initialized, bool());
MOCK_METHOD3(get_metadata, void(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id));
MOCK_METHOD4(get_mutable_metadata, void(uint64_t*, uint64_t*,
std::set<cls::journal::Client> *,
Context*));
MOCK_METHOD2(register_client, void(const bufferlist &, Context *));
MOCK_METHOD1(unregister_client, void(Context *));
MOCK_METHOD3(get_client, void(const std::string &, cls::journal::Client *,
Context *));
MOCK_METHOD2(get_cached_client, int(const std::string&, cls::journal::Client*));
MOCK_METHOD2(update_client, void(const bufferlist &, Context *));
MOCK_METHOD4(allocate_tag, void(uint64_t, const bufferlist &,
cls::journal::Tag*, Context *));
MOCK_METHOD3(get_tag, void(uint64_t, cls::journal::Tag *, Context *));
MOCK_METHOD3(get_tags, void(uint64_t, journal::Journaler::Tags*, Context*));
MOCK_METHOD4(get_tags, void(uint64_t, uint64_t, journal::Journaler::Tags*,
Context*));
MOCK_METHOD1(start_replay, void(::journal::ReplayHandler *replay_handler));
MOCK_METHOD2(start_live_replay, void(ReplayHandler *, double));
MOCK_METHOD1(try_pop_front, bool(MockReplayEntryProxy *));
MOCK_METHOD2(try_pop_front, bool(MockReplayEntryProxy *, uint64_t *));
MOCK_METHOD0(stop_replay, void());
MOCK_METHOD1(stop_replay, void(Context *on_finish));
MOCK_METHOD1(start_append, void(uint64_t));
MOCK_METHOD3(set_append_batch_options, void(int, uint64_t, double));
MOCK_CONST_METHOD0(get_max_append_size, uint64_t());
MOCK_METHOD2(append, MockFutureProxy(uint64_t tag_id,
const bufferlist &bl));
MOCK_METHOD1(flush, void(Context *on_safe));
MOCK_METHOD1(stop_append, void(Context *on_safe));
MOCK_METHOD1(committed, void(const MockReplayEntryProxy &));
MOCK_METHOD1(committed, void(const MockFutureProxy &future));
MOCK_METHOD1(flush_commit_position, void(Context*));
MOCK_METHOD1(add_listener, void(JournalMetadataListener *));
MOCK_METHOD1(remove_listener, void(JournalMetadataListener *));
};
struct MockJournalerProxy {
MockJournalerProxy() {
MockJournaler::get_instance().construct();
}
template <typename IoCtxT>
MockJournalerProxy(IoCtxT &header_ioctx, const std::string &,
const std::string &, const Settings&,
journal::CacheManagerHandler *) {
MockJournaler::get_instance().construct();
}
template <typename WorkQueue, typename Timer>
MockJournalerProxy(WorkQueue *work_queue, Timer *timer, ceph::mutex *timer_lock,
librados::IoCtx &header_ioctx,
const std::string &journal_id,
const std::string &client_id, const Settings&,
journal::CacheManagerHandler *) {
MockJournaler::get_instance().construct();
}
void exists(Context *on_finish) const {
on_finish->complete(-EINVAL);
}
void create(uint8_t order, uint8_t splay_width, int64_t pool_id, Context *on_finish) {
on_finish->complete(-EINVAL);
}
void remove(bool force, Context *on_finish) {
on_finish->complete(-EINVAL);
}
int register_client(const bufferlist &data) {
return -EINVAL;
}
void allocate_tag(uint64_t tag_class, const bufferlist &tag_data,
cls::journal::Tag* tag, Context *on_finish) {
MockJournaler::get_instance().allocate_tag(tag_class, tag_data, tag,
on_finish);
}
void init(Context *on_finish) {
MockJournaler::get_instance().init(on_finish);
}
void shut_down() {
MockJournaler::get_instance().shut_down();
}
void shut_down(Context *on_finish) {
MockJournaler::get_instance().shut_down(on_finish);
}
bool is_initialized() const {
return MockJournaler::get_instance().is_initialized();
}
void get_metadata(uint8_t *order, uint8_t *splay_width, int64_t *pool_id) {
MockJournaler::get_instance().get_metadata(order, splay_width, pool_id);
}
void get_mutable_metadata(uint64_t *min, uint64_t *active,
std::set<cls::journal::Client> *clients,
Context *on_finish) {
MockJournaler::get_instance().get_mutable_metadata(min, active, clients,
on_finish);
}
void register_client(const bufferlist &data, Context *on_finish) {
MockJournaler::get_instance().register_client(data, on_finish);
}
void unregister_client(Context *on_finish) {
MockJournaler::get_instance().unregister_client(on_finish);
}
void get_client(const std::string &client_id, cls::journal::Client *client,
Context *on_finish) {
MockJournaler::get_instance().get_client(client_id, client, on_finish);
}
int get_cached_client(const std::string& client_id,
cls::journal::Client* client) {
return MockJournaler::get_instance().get_cached_client(client_id, client);
}
void update_client(const bufferlist &client_data, Context *on_finish) {
MockJournaler::get_instance().update_client(client_data, on_finish);
}
void get_tag(uint64_t tag_tid, cls::journal::Tag *tag, Context *on_finish) {
MockJournaler::get_instance().get_tag(tag_tid, tag, on_finish);
}
void get_tags(uint64_t tag_class, journal::Journaler::Tags *tags,
Context *on_finish) {
MockJournaler::get_instance().get_tags(tag_class, tags, on_finish);
}
void get_tags(uint64_t start_after_tag_tid, uint64_t tag_class,
journal::Journaler::Tags *tags, Context *on_finish) {
MockJournaler::get_instance().get_tags(start_after_tag_tid, tag_class, tags,
on_finish);
}
void start_replay(::journal::ReplayHandler *replay_handler) {
MockJournaler::get_instance().start_replay(replay_handler);
}
void start_live_replay(ReplayHandler *handler, double interval) {
MockJournaler::get_instance().start_live_replay(handler, interval);
}
bool try_pop_front(MockReplayEntryProxy *replay_entry) {
return MockJournaler::get_instance().try_pop_front(replay_entry);
}
bool try_pop_front(MockReplayEntryProxy *entry, uint64_t *tag_tid) {
return MockJournaler::get_instance().try_pop_front(entry, tag_tid);
}
void stop_replay() {
MockJournaler::get_instance().stop_replay();
}
void stop_replay(Context *on_finish) {
MockJournaler::get_instance().stop_replay(on_finish);
}
void start_append(uint64_t max_in_flight_appends) {
MockJournaler::get_instance().start_append(max_in_flight_appends);
}
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age) {
MockJournaler::get_instance().set_append_batch_options(
flush_interval, flush_bytes, flush_age);
}
uint64_t get_max_append_size() const {
return MockJournaler::get_instance().get_max_append_size();
}
MockFutureProxy append(uint64_t tag_id, const bufferlist &bl) {
return MockJournaler::get_instance().append(tag_id, bl);
}
void flush(Context *on_safe) {
MockJournaler::get_instance().flush(on_safe);
}
void stop_append(Context *on_safe) {
MockJournaler::get_instance().stop_append(on_safe);
}
void committed(const MockReplayEntryProxy &entry) {
MockJournaler::get_instance().committed(entry);
}
void committed(const MockFutureProxy &future) {
MockJournaler::get_instance().committed(future);
}
void flush_commit_position(Context *on_finish) {
MockJournaler::get_instance().flush_commit_position(on_finish);
}
void add_listener(JournalMetadataListener *listener) {
MockJournaler::get_instance().add_listener(listener);
}
void remove_listener(JournalMetadataListener *listener) {
MockJournaler::get_instance().remove_listener(listener);
}
};
std::ostream &operator<<(std::ostream &os, const MockJournalerProxy &);
} // namespace journal
#endif // TEST_RBD_MIRROR_MOCK_JOURNALER_H
| 10,179 | 31.420382 | 88 | h |
null | ceph-main/src/test/lazy-omap-stats/lazy_omap_stats_test.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LAZY_OMAP_STATS_TEST_H
#define CEPH_LAZY_OMAP_STATS_TEST_H
#include <map>
#include <regex>
#include <string>
#include "include/compat.h"
#include "include/rados/librados.hpp"
struct index_t {
unsigned byte_index = 0;
unsigned key_index = 0;
};
class LazyOmapStatsTest
{
librados::IoCtx io_ctx;
librados::Rados rados;
std::map<std::string, librados::bufferlist> payload;
struct lazy_omap_test_t {
unsigned payload_size = 0;
unsigned replica_count = 3;
unsigned keys = 2000;
unsigned how_many = 50;
std::string pool_name = "lazy_omap_test_pool";
std::string pool_id;
unsigned total_bytes = 0;
unsigned total_keys = 0;
} conf;
typedef enum {
TARGET_MON,
TARGET_MGR
} CommandTarget;
LazyOmapStatsTest(LazyOmapStatsTest&) = delete;
void operator=(LazyOmapStatsTest) = delete;
void init(const int argc, const char** argv);
void shutdown();
void write_omap(const std::string& object_name);
const std::string get_name() const;
void create_payload();
void write_many(const unsigned how_many);
void scrub();
const int find_matches(std::string& output, std::regex& reg) const;
void check_one();
const int find_index(std::string& haystack, std::regex& needle,
std::string label) const;
const unsigned tally_column(const unsigned omap_bytes_index,
const std::string& table, bool header) const;
void check_column(const int index, const std::string& table,
const std::string& type, bool header = true) const;
index_t get_indexes(std::regex& reg, std::string& output) const;
void check_pg_dump();
void check_pg_dump_summary();
void check_pg_dump_pgs();
void check_pg_dump_pools();
void check_pg_ls();
const std::string get_output(
const std::string command = R"({"prefix": "pg dump"})",
const bool silent = false,
const CommandTarget target = CommandTarget::TARGET_MGR);
void get_pool_id(const std::string& pool);
std::map<std::string, std::string> get_scrub_stamps();
void wait_for_active_clean();
public:
LazyOmapStatsTest() = default;
const int run(const int argc, const char** argv);
};
#endif // CEPH_LAZY_OMAP_STATS_TEST_H
| 2,632 | 28.58427 | 71 | h |
null | ceph-main/src/test/libcephfs/ceph_pthread_self.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBCEPHFS_PTHREAD_SELF
#define CEPH_TEST_LIBCEPHFS_PTHREAD_SELF
#include <pthread.h>
#include <type_traits>
/*
* There is a difference between libc shipped with FreeBSD and
* glibc shipped with GNU/Linux for the return type of pthread_self().
*
* Introduced a conversion function in include/compat.h
* (uint64_t)ceph_pthread_self()
*
* libc returns an opague pthread_t that is not default convertable
* to a uint64_t, which is what gtest expects.
* And tests using gtest will not compile because of this difference.
*
*/
static uint64_t ceph_pthread_self() {
auto me = pthread_self();
static_assert(std::is_convertible_v<decltype(me), uint64_t> ||
std::is_pointer_v<decltype(me)>,
"we need to use pthread_self() for the owner parameter");
return static_cast<uint64_t>(me);
}
#endif
| 958 | 28.96875 | 73 | h |
null | ceph-main/src/test/librados/TestCase.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RADOS_TESTCASE_H
#define CEPH_TEST_RADOS_TESTCASE_H
#include "include/rados/librados.h"
#include "gtest/gtest.h"
#include <string>
/**
* These test cases create a temporary pool that lives as long as the
* test case. We initially use the default namespace and assume
* test will whatever namespaces it wants. After each test all objects
* are removed.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class RadosTestNS : public ::testing::Test {
public:
RadosTestNS(bool c=false) : cleanup(c) {}
~RadosTestNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_all_objects(rados_ioctx_t ioctx);
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
bool cleanup;
};
struct RadosTestNSCleanup : public RadosTestNS {
RadosTestNSCleanup() : RadosTestNS(true) {}
};
class RadosTestECNS : public RadosTestNS {
public:
RadosTestECNS(bool c=false) : cleanup(c) {}
~RadosTestECNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
uint64_t alignment = 0;
bool cleanup;
};
struct RadosTestECNSCleanup : public RadosTestECNS {
RadosTestECNSCleanup() : RadosTestECNS(true) {}
};
/**
* These test cases create a temporary pool that lives as long as the
* test case. Each test within a test case gets a new ioctx set to a
* unique namespace within the pool.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class RadosTest : public ::testing::Test {
public:
RadosTest(bool c=false) : cleanup(c) {}
~RadosTest() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_default_namespace(rados_ioctx_t ioctx);
static void cleanup_namespace(rados_ioctx_t ioctx, std::string ns);
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
std::string nspace;
bool cleanup;
};
class RadosTestEC : public RadosTest {
public:
RadosTestEC(bool c=false) : cleanup(c) {}
~RadosTestEC() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = nullptr;
rados_ioctx_t ioctx = nullptr;
bool cleanup;
std::string nspace;
uint64_t alignment = 0;
};
/**
* Test case without creating a temporary pool in advance.
* This is necessary for scenarios such that we need to
* manually create a pool, start some long-runing tasks and
* then the related pool is suddenly gone.
*/
class RadosTestNP: public ::testing::Test {
public:
RadosTestNP() {}
~RadosTestNP() override {}
};
#endif
| 3,255 | 25.048 | 71 | h |
null | ceph-main/src/test/librados/test.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_TEST_RADOS_API_TEST_H
#define CEPH_TEST_RADOS_API_TEST_H
#include "include/rados/librados.h"
#include "test/librados/test_shared.h"
#include <map>
#include <string>
#include <unistd.h>
std::string create_one_pool(const std::string &pool_name, rados_t *cluster,
uint32_t pg_num=0);
std::string create_one_ec_pool(const std::string &pool_name, rados_t *cluster);
std::string connect_cluster(rados_t *cluster);
int destroy_one_pool(const std::string &pool_name, rados_t *cluster);
int destroy_one_ec_pool(const std::string &pool_name, rados_t *cluster);
#endif
| 988 | 28.969697 | 79 | h |
null | ceph-main/src/test/librados/test_cxx.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/rados/librados.hpp"
#include "test/librados/test_shared.h"
std::string create_one_pool_pp(const std::string &pool_name,
librados::Rados &cluster);
std::string create_one_pool_pp(const std::string &pool_name,
librados::Rados &cluster,
const std::map<std::string, std::string> &config);
std::string create_one_ec_pool_pp(const std::string &pool_name,
librados::Rados &cluster);
std::string connect_cluster_pp(librados::Rados &cluster);
std::string connect_cluster_pp(librados::Rados &cluster,
const std::map<std::string, std::string> &config);
int destroy_one_pool_pp(const std::string &pool_name, librados::Rados &cluster);
int destroy_one_ec_pool_pp(const std::string &pool_name, librados::Rados &cluster);
| 882 | 43.15 | 83 | h |
null | ceph-main/src/test/librados/test_shared.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*
// vim: ts=8 sw=2 smarttab
#pragma once
#include <unistd.h>
#include <chrono>
#include <map>
#include <string>
#include <thread>
#include "include/buffer_fwd.h"
// helpers shared by librados tests
std::string get_temp_pool_name(const std::string &prefix = "test-rados-api-");
void assert_eq_sparse(ceph::bufferlist& expected,
const std::map<uint64_t, uint64_t>& extents,
ceph::bufferlist& actual);
class TestAlarm
{
public:
#ifndef _WIN32
TestAlarm() {
alarm(2400);
}
~TestAlarm() {
alarm(0);
}
#else
// TODO: add a timeout mechanism for Windows as well, possibly by using
// CreateTimerQueueTimer.
TestAlarm() {
}
~TestAlarm() {
}
#endif
};
template<class Rep, class Period, typename Func, typename... Args,
typename Return = std::result_of_t<Func&&(Args&&...)>>
Return wait_until(const std::chrono::duration<Rep, Period>& rel_time,
const std::chrono::duration<Rep, Period>& step,
const Return& expected,
Func&& func, Args&&... args)
{
std::this_thread::sleep_for(rel_time - step);
for (auto& s : {step, step}) {
if (!s.count()) {
break;
}
auto ret = func(std::forward<Args>(args)...);
if (ret == expected) {
return ret;
}
std::this_thread::sleep_for(s);
}
return func(std::forward<Args>(args)...);
}
| 1,456 | 23.694915 | 78 | h |
null | ceph-main/src/test/librados/testcase_cxx.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "gtest/gtest.h"
#include "include/rados/librados.hpp"
class RadosTestPPNS : public ::testing::Test {
public:
RadosTestPPNS(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestPPNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_all_objects(librados::IoCtx ioctx);
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
};
struct RadosTestPPNSCleanup : public RadosTestPPNS {
RadosTestPPNSCleanup() : RadosTestPPNS(true) {}
};
class RadosTestParamPPNS : public ::testing::TestWithParam<const char*> {
public:
RadosTestParamPPNS(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestParamPPNS() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static void cleanup_all_objects(librados::IoCtx ioctx);
static librados::Rados s_cluster;
static std::string pool_name;
static std::string cache_pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
};
class RadosTestECPPNS : public RadosTestPPNS {
public:
RadosTestECPPNS(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestECPPNS() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
uint64_t alignment = 0;
bool cleanup;
};
struct RadosTestECPPNSCleanup : public RadosTestECPPNS {
RadosTestECPPNSCleanup() : RadosTestECPPNS(true) {}
};
class RadosTestPP : public ::testing::Test {
public:
RadosTestPP(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestPP() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static void cleanup_default_namespace(librados::IoCtx ioctx);
static void cleanup_namespace(librados::IoCtx ioctx, std::string ns);
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
std::string nspace;
};
class RadosTestParamPP : public ::testing::TestWithParam<const char*> {
public:
RadosTestParamPP(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestParamPP() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static void cleanup_default_namespace(librados::IoCtx ioctx);
static void cleanup_namespace(librados::IoCtx ioctx, std::string ns);
static librados::Rados s_cluster;
static std::string pool_name;
static std::string cache_pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
std::string nspace;
};
class RadosTestECPP : public RadosTestPP {
public:
RadosTestECPP(bool c=false) : cluster(s_cluster), cleanup(c) {}
~RadosTestECPP() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
bool cleanup;
std::string nspace;
uint64_t alignment = 0;
};
| 3,580 | 26.335878 | 73 | h |
null | ceph-main/src/test/librados_test_stub/LibradosTestStub.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_TEST_STUB_H
#define LIBRADOS_TEST_STUB_H
#include "include/rados/librados_fwd.hpp"
#include <boost/shared_ptr.hpp>
namespace neorados {
struct IOContext;
struct RADOS;
} // namespace neorados
namespace librados {
class MockTestMemIoCtxImpl;
class MockTestMemRadosClient;
class TestCluster;
class TestClassHandler;
MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx);
MockTestMemIoCtxImpl &get_mock_io_ctx(neorados::RADOS& rados,
neorados::IOContext& io_context);
MockTestMemRadosClient &get_mock_rados_client(neorados::RADOS& rados);
} // namespace librados
namespace librados_test_stub {
typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
void set_cluster(TestClusterRef cluster);
TestClusterRef get_cluster();
librados::TestClassHandler* get_class_handler();
} // namespace librados_test_stub
#endif // LIBRADOS_TEST_STUB_H
| 1,008 | 22.465116 | 71 | h |
null | ceph-main/src/test/librados_test_stub/MockTestMemCluster.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
#define LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
#include "include/common_fwd.h"
#include "test/librados_test_stub/TestMemCluster.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "gmock/gmock.h"
namespace librados {
class TestRadosClient;
class MockTestMemCluster : public TestMemCluster {
public:
MockTestMemCluster() {
default_to_dispatch();
}
MOCK_METHOD1(create_rados_client, TestRadosClient*(CephContext*));
MockTestMemRadosClient* do_create_rados_client(CephContext *cct) {
return new ::testing::NiceMock<MockTestMemRadosClient>(cct, this);
}
void default_to_dispatch() {
using namespace ::testing;
ON_CALL(*this, create_rados_client(_)).WillByDefault(Invoke(this, &MockTestMemCluster::do_create_rados_client));
}
};
} // namespace librados
#endif // LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
| 984 | 25.621622 | 116 | h |
null | ceph-main/src/test/librados_test_stub/MockTestMemIoCtxImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
#define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
#include "test/librados_test_stub/TestMemIoCtxImpl.h"
#include "test/librados_test_stub/TestMemCluster.h"
#include "gmock/gmock.h"
namespace librados {
class MockTestMemRadosClient;
class MockTestMemIoCtxImpl : public TestMemIoCtxImpl {
public:
MockTestMemIoCtxImpl(MockTestMemRadosClient *mock_client,
TestMemRadosClient *client, int64_t pool_id,
const std::string& pool_name,
TestMemCluster::Pool *pool)
: TestMemIoCtxImpl(client, pool_id, pool_name, pool),
m_mock_client(mock_client), m_client(client) {
default_to_parent();
}
MockTestMemRadosClient *get_mock_rados_client() {
return m_mock_client;
}
MOCK_METHOD0(clone, TestIoCtxImpl*());
TestIoCtxImpl *do_clone() {
TestIoCtxImpl *io_ctx_impl = new ::testing::NiceMock<MockTestMemIoCtxImpl>(
m_mock_client, m_client, get_pool_id(), get_pool_name(), get_pool());
io_ctx_impl->set_snap_read(get_snap_read());
io_ctx_impl->set_snap_context(get_snap_context());
return io_ctx_impl;
}
MOCK_METHOD5(aio_notify, void(const std::string& o, AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl));
void do_aio_notify(const std::string& o, AioCompletionImpl *c, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl) {
return TestMemIoCtxImpl::aio_notify(o, c, bl, timeout_ms, pbl);
}
MOCK_METHOD6(aio_operate, int(const std::string&, TestObjectOperationImpl&,
AioCompletionImpl*, SnapContext*,
const ceph::real_time*, int));
int do_aio_operate(const std::string& o, TestObjectOperationImpl& ops,
AioCompletionImpl* c, SnapContext* snapc,
const ceph::real_time* pmtime, int flags) {
return TestMemIoCtxImpl::aio_operate(o, ops, c, snapc, pmtime, flags);
}
MOCK_METHOD4(aio_watch, int(const std::string& o, AioCompletionImpl *c,
uint64_t *handle, librados::WatchCtx2 *ctx));
int do_aio_watch(const std::string& o, AioCompletionImpl *c,
uint64_t *handle, librados::WatchCtx2 *ctx) {
return TestMemIoCtxImpl::aio_watch(o, c, handle, ctx);
}
MOCK_METHOD2(aio_unwatch, int(uint64_t handle, AioCompletionImpl *c));
int do_aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
return TestMemIoCtxImpl::aio_unwatch(handle, c);
}
MOCK_METHOD2(assert_exists, int(const std::string &, uint64_t));
int do_assert_exists(const std::string &oid, uint64_t snap_id) {
return TestMemIoCtxImpl::assert_exists(oid, snap_id);
}
MOCK_METHOD2(assert_version, int(const std::string &, uint64_t));
int do_assert_version(const std::string &oid, uint64_t ver) {
return TestMemIoCtxImpl::assert_version(oid, ver);
}
MOCK_METHOD3(create, int(const std::string&, bool, const SnapContext &));
int do_create(const std::string& oid, bool exclusive,
const SnapContext &snapc) {
return TestMemIoCtxImpl::create(oid, exclusive, snapc);
}
MOCK_METHOD4(cmpext, int(const std::string&, uint64_t, bufferlist&,
uint64_t snap_id));
int do_cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
uint64_t snap_id) {
return TestMemIoCtxImpl::cmpext(oid, off, cmp_bl, snap_id);
}
MOCK_METHOD8(exec, int(const std::string& oid,
TestClassHandler *handler,
const char *cls,
const char *method,
bufferlist& inbl,
bufferlist* outbl,
uint64_t snap_id,
const SnapContext &snapc));
int do_exec(const std::string& oid, TestClassHandler *handler,
const char *cls, const char *method, bufferlist& inbl,
bufferlist* outbl, uint64_t snap_id, const SnapContext &snapc) {
return TestMemIoCtxImpl::exec(oid, handler, cls, method, inbl, outbl,
snap_id, snapc);
}
MOCK_CONST_METHOD0(get_instance_id, uint64_t());
uint64_t do_get_instance_id() const {
return TestMemIoCtxImpl::get_instance_id();
}
MOCK_METHOD2(list_snaps, int(const std::string& o, snap_set_t *out_snaps));
int do_list_snaps(const std::string& o, snap_set_t *out_snaps) {
return TestMemIoCtxImpl::list_snaps(o, out_snaps);
}
MOCK_METHOD2(list_watchers, int(const std::string& o,
std::list<obj_watch_t> *out_watchers));
int do_list_watchers(const std::string& o,
std::list<obj_watch_t> *out_watchers) {
return TestMemIoCtxImpl::list_watchers(o, out_watchers);
}
MOCK_METHOD4(notify, int(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl));
int do_notify(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl) {
return TestMemIoCtxImpl::notify(o, bl, timeout_ms, pbl);
}
MOCK_METHOD1(set_snap_read, void(snap_t));
void do_set_snap_read(snap_t snap_id) {
return TestMemIoCtxImpl::set_snap_read(snap_id);
}
MOCK_METHOD6(sparse_read, int(const std::string& oid,
uint64_t off,
uint64_t len,
std::map<uint64_t, uint64_t> *m,
bufferlist *bl, uint64_t));
int do_sparse_read(const std::string& oid, uint64_t off, size_t len,
std::map<uint64_t, uint64_t> *m, bufferlist *bl,
uint64_t snap_id) {
return TestMemIoCtxImpl::sparse_read(oid, off, len, m, bl, snap_id);
}
MOCK_METHOD6(read, int(const std::string& oid,
size_t len,
uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver));
int do_read(const std::string& oid, size_t len, uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver) {
return TestMemIoCtxImpl::read(oid, len, off, bl, snap_id, objver);
}
MOCK_METHOD2(remove, int(const std::string& oid, const SnapContext &snapc));
int do_remove(const std::string& oid, const SnapContext &snapc) {
return TestMemIoCtxImpl::remove(oid, snapc);
}
MOCK_METHOD1(selfmanaged_snap_create, int(uint64_t *snap_id));
int do_selfmanaged_snap_create(uint64_t *snap_id) {
return TestMemIoCtxImpl::selfmanaged_snap_create(snap_id);
}
MOCK_METHOD1(selfmanaged_snap_remove, int(uint64_t snap_id));
int do_selfmanaged_snap_remove(uint64_t snap_id) {
return TestMemIoCtxImpl::selfmanaged_snap_remove(snap_id);
}
MOCK_METHOD2(selfmanaged_snap_rollback, int(const std::string& oid,
uint64_t snap_id));
int do_selfmanaged_snap_rollback(const std::string& oid, uint64_t snap_id) {
return TestMemIoCtxImpl::selfmanaged_snap_rollback(oid, snap_id);
}
MOCK_METHOD3(truncate, int(const std::string& oid,
uint64_t size,
const SnapContext &snapc));
int do_truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) {
return TestMemIoCtxImpl::truncate(oid, size, snapc);
}
MOCK_METHOD5(write, int(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc));
int do_write(const std::string& oid, bufferlist& bl, size_t len, uint64_t off,
const SnapContext &snapc) {
return TestMemIoCtxImpl::write(oid, bl, len, off, snapc);
}
MOCK_METHOD3(write_full, int(const std::string& oid,
bufferlist& bl,
const SnapContext &snapc));
int do_write_full(const std::string& oid, bufferlist& bl,
const SnapContext &snapc) {
return TestMemIoCtxImpl::write_full(oid, bl, snapc);
}
MOCK_METHOD5(writesame, int(const std::string& oid, bufferlist& bl,
size_t len, uint64_t off,
const SnapContext &snapc));
int do_writesame(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) {
return TestMemIoCtxImpl::writesame(oid, bl, len, off, snapc);
}
MOCK_METHOD4(zero, int(const std::string& oid, uint64_t offset,
uint64_t length, const SnapContext &snapc));
int do_zero(const std::string& oid, uint64_t offset,
uint64_t length, const SnapContext &snapc) {
return TestMemIoCtxImpl::zero(oid, offset, length, snapc);
}
void default_to_parent() {
using namespace ::testing;
ON_CALL(*this, clone()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_clone));
ON_CALL(*this, aio_notify(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_notify));
ON_CALL(*this, aio_operate(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_operate));
ON_CALL(*this, aio_watch(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_watch));
ON_CALL(*this, aio_unwatch(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_aio_unwatch));
ON_CALL(*this, assert_exists(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_assert_exists));
ON_CALL(*this, assert_version(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_assert_version));
ON_CALL(*this, create(_, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_create));
ON_CALL(*this, cmpext(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_cmpext));
ON_CALL(*this, exec(_, _, _, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_exec));
ON_CALL(*this, get_instance_id()).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_get_instance_id));
ON_CALL(*this, list_snaps(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_snaps));
ON_CALL(*this, list_watchers(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_list_watchers));
ON_CALL(*this, notify(_, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_notify));
ON_CALL(*this, read(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_read));
ON_CALL(*this, set_snap_read(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_set_snap_read));
ON_CALL(*this, sparse_read(_, _, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_sparse_read));
ON_CALL(*this, remove(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_remove));
ON_CALL(*this, selfmanaged_snap_create(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_create));
ON_CALL(*this, selfmanaged_snap_remove(_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_remove));
ON_CALL(*this, selfmanaged_snap_rollback(_, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_selfmanaged_snap_rollback));
ON_CALL(*this, truncate(_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_truncate));
ON_CALL(*this, write(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write));
ON_CALL(*this, write_full(_, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_write_full));
ON_CALL(*this, writesame(_, _, _, _, _)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_writesame));
ON_CALL(*this, zero(_,_,_,_)).WillByDefault(Invoke(this, &MockTestMemIoCtxImpl::do_zero));
}
private:
MockTestMemRadosClient *m_mock_client;
TestMemRadosClient *m_client;
};
} // namespace librados
#endif // LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
| 12,014 | 46.490119 | 133 | h |
null | ceph-main/src/test/librados_test_stub/TestClassHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_CLASS_HANDLER_H
#define CEPH_TEST_CLASS_HANDLER_H
#include "objclass/objclass.h"
#include "common/snap_types.h"
#include <boost/shared_ptr.hpp>
#include <list>
#include <map>
#include <string>
namespace librados
{
class TestIoCtxImpl;
class TestClassHandler {
public:
TestClassHandler();
~TestClassHandler();
struct MethodContext {
~MethodContext();
TestIoCtxImpl *io_ctx_impl;
std::string oid;
uint64_t snap_id;
SnapContext snapc;
};
typedef boost::shared_ptr<MethodContext> SharedMethodContext;
struct Method {
cls_method_cxx_call_t class_call;
};
typedef boost::shared_ptr<Method> SharedMethod;
typedef std::map<std::string, SharedMethod> Methods;
typedef std::map<std::string, cls_cxx_filter_factory_t> Filters;
struct Class {
Methods methods;
Filters filters;
};
typedef boost::shared_ptr<Class> SharedClass;
void open_all_classes();
int create(const std::string &name, cls_handle_t *handle);
int create_method(cls_handle_t hclass, const char *method,
cls_method_cxx_call_t class_call,
cls_method_handle_t *handle);
cls_method_cxx_call_t get_method(const std::string &cls,
const std::string &method);
SharedMethodContext get_method_context(TestIoCtxImpl *io_ctx_impl,
const std::string &oid,
uint64_t snap_id,
const SnapContext &snapc);
int create_filter(cls_handle_t hclass, const std::string& filter_name,
cls_cxx_filter_factory_t fn);
private:
typedef std::map<std::string, SharedClass> Classes;
typedef std::list<void*> ClassHandles;
Classes m_classes;
ClassHandles m_class_handles;
Filters m_filters;
void open_class(const std::string& name, const std::string& path);
};
} // namespace librados
#endif // CEPH_TEST_CLASS_HANDLER_H
| 2,063 | 24.8 | 72 | h |
null | ceph-main/src/test/librados_test_stub/TestCluster.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_CLUSTER_H
#define CEPH_TEST_CLUSTER_H
#include "test/librados_test_stub/TestWatchNotify.h"
#include "include/common_fwd.h"
namespace librados {
class TestRadosClient;
class TestWatchNotify;
class TestCluster {
public:
struct ObjectLocator {
std::string nspace;
std::string name;
ObjectLocator(const std::string& nspace, const std::string& name)
: nspace(nspace), name(name) {
}
bool operator<(const ObjectLocator& rhs) const {
if (nspace != rhs.nspace) {
return nspace < rhs.nspace;
}
return name < rhs.name;
}
};
struct ObjectHandler {
virtual ~ObjectHandler() {}
virtual void handle_removed(TestRadosClient* test_rados_client) = 0;
};
TestCluster() : m_watch_notify(this) {
}
virtual ~TestCluster() {
}
virtual TestRadosClient *create_rados_client(CephContext *cct) = 0;
virtual int register_object_handler(int64_t pool_id,
const ObjectLocator& locator,
ObjectHandler* object_handler) = 0;
virtual void unregister_object_handler(int64_t pool_id,
const ObjectLocator& locator,
ObjectHandler* object_handler) = 0;
TestWatchNotify *get_watch_notify() {
return &m_watch_notify;
}
protected:
TestWatchNotify m_watch_notify;
};
} // namespace librados
#endif // CEPH_TEST_CLUSTER_H
| 1,565 | 23.092308 | 76 | h |
null | ceph-main/src/test/librados_test_stub/TestIoCtxImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_IO_CTX_IMPL_H
#define CEPH_TEST_IO_CTX_IMPL_H
#include <list>
#include <atomic>
#include <boost/function.hpp>
#include "include/rados/librados.hpp"
#include "include/Context.h"
#include "common/snap_types.h"
namespace librados {
class TestClassHandler;
class TestIoCtxImpl;
class TestRadosClient;
typedef boost::function<int(TestIoCtxImpl*,
const std::string&,
bufferlist *,
uint64_t,
const SnapContext &,
uint64_t*)> ObjectOperationTestImpl;
typedef std::list<ObjectOperationTestImpl> ObjectOperations;
struct TestObjectOperationImpl {
public:
void get();
void put();
ObjectOperations ops;
private:
std::atomic<uint64_t> m_refcount = { 0 };
};
class TestIoCtxImpl {
public:
typedef boost::function<int(TestIoCtxImpl *, const std::string &)> Operation;
TestIoCtxImpl();
explicit TestIoCtxImpl(TestRadosClient *client, int64_t m_pool_id,
const std::string& pool_name);
TestRadosClient *get_rados_client() {
return m_client;
}
void get();
void put();
inline int64_t get_pool_id() const {
return m_pool_id;
}
virtual TestIoCtxImpl *clone() = 0;
virtual uint64_t get_instance_id() const;
virtual int64_t get_id();
virtual uint64_t get_last_version();
virtual std::string get_pool_name();
inline void set_namespace(const std::string& namespace_name) {
m_namespace_name = namespace_name;
}
inline std::string get_namespace() const {
return m_namespace_name;
}
snap_t get_snap_read() const {
return m_snap_seq;
}
inline void set_snap_context(const SnapContext& snapc) {
m_snapc = snapc;
}
const SnapContext &get_snap_context() const {
return m_snapc;
}
virtual int aio_flush();
virtual void aio_flush_async(AioCompletionImpl *c);
virtual void aio_notify(const std::string& oid, AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl);
virtual int aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
AioCompletionImpl *c, SnapContext *snap_context,
const ceph::real_time *pmtime, int flags);
virtual int aio_operate_read(const std::string& oid, TestObjectOperationImpl &ops,
AioCompletionImpl *c, int flags,
bufferlist *pbl, uint64_t snap_id,
uint64_t* objver);
virtual int aio_remove(const std::string& oid, AioCompletionImpl *c,
int flags = 0) = 0;
virtual int aio_watch(const std::string& o, AioCompletionImpl *c,
uint64_t *handle, librados::WatchCtx2 *ctx);
virtual int aio_unwatch(uint64_t handle, AioCompletionImpl *c);
virtual int append(const std::string& oid, const bufferlist &bl,
const SnapContext &snapc) = 0;
virtual int assert_exists(const std::string &oid, uint64_t snap_id) = 0;
virtual int assert_version(const std::string &oid, uint64_t ver) = 0;
virtual int create(const std::string& oid, bool exclusive,
const SnapContext &snapc) = 0;
virtual int exec(const std::string& oid, TestClassHandler *handler,
const char *cls, const char *method,
bufferlist& inbl, bufferlist* outbl,
uint64_t snap_id, const SnapContext &snapc);
virtual int list_snaps(const std::string& o, snap_set_t *out_snaps) = 0;
virtual int list_watchers(const std::string& o,
std::list<obj_watch_t> *out_watchers);
virtual int notify(const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl);
virtual void notify_ack(const std::string& o, uint64_t notify_id,
uint64_t handle, bufferlist& bl);
virtual int omap_get_vals(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals) = 0;
virtual int omap_get_vals2(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore) = 0;
virtual int omap_rm_keys(const std::string& oid,
const std::set<std::string>& keys) = 0;
virtual int omap_set(const std::string& oid,
const std::map<std::string, bufferlist> &map) = 0;
virtual int operate(const std::string& oid, TestObjectOperationImpl &ops);
virtual int operate_read(const std::string& oid, TestObjectOperationImpl &ops,
bufferlist *pbl);
virtual int read(const std::string& oid, size_t len, uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver) = 0;
virtual int remove(const std::string& oid, const SnapContext &snapc) = 0;
virtual int selfmanaged_snap_create(uint64_t *snapid) = 0;
virtual void aio_selfmanaged_snap_create(uint64_t *snapid,
AioCompletionImpl *c);
virtual int selfmanaged_snap_remove(uint64_t snapid) = 0;
virtual void aio_selfmanaged_snap_remove(uint64_t snapid,
AioCompletionImpl *c);
virtual int selfmanaged_snap_rollback(const std::string& oid,
uint64_t snapid) = 0;
virtual int selfmanaged_snap_set_write_ctx(snap_t seq,
std::vector<snap_t>& snaps);
virtual int set_alloc_hint(const std::string& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags,
const SnapContext &snapc);
virtual void set_snap_read(snap_t seq);
virtual int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl, uint64_t snap_id) = 0;
virtual int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) = 0;
virtual int truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) = 0;
virtual int tmap_update(const std::string& oid, bufferlist& cmdbl);
virtual int unwatch(uint64_t handle);
virtual int watch(const std::string& o, uint64_t *handle,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2);
virtual int write(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) = 0;
virtual int write_full(const std::string& oid, bufferlist& bl,
const SnapContext &snapc) = 0;
virtual int writesame(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) = 0;
virtual int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
uint64_t snap_id) = 0;
virtual int xattr_get(const std::string& oid,
std::map<std::string, bufferlist>* attrset) = 0;
virtual int xattr_set(const std::string& oid, const std::string &name,
bufferlist& bl) = 0;
virtual int zero(const std::string& oid, uint64_t off, uint64_t len,
const SnapContext &snapc) = 0;
int execute_operation(const std::string& oid,
const Operation &operation);
protected:
TestIoCtxImpl(const TestIoCtxImpl& rhs);
virtual ~TestIoCtxImpl();
int execute_aio_operations(const std::string& oid,
TestObjectOperationImpl *ops,
bufferlist *pbl, uint64_t,
const SnapContext &snapc,
uint64_t* objver);
private:
struct C_AioNotify : public Context {
TestIoCtxImpl *io_ctx;
AioCompletionImpl *aio_comp;
C_AioNotify(TestIoCtxImpl *_io_ctx, AioCompletionImpl *_aio_comp)
: io_ctx(_io_ctx), aio_comp(_aio_comp) {
}
void finish(int r) override {
io_ctx->handle_aio_notify_complete(aio_comp, r);
}
};
TestRadosClient *m_client;
int64_t m_pool_id = 0;
std::string m_pool_name;
std::string m_namespace_name;
snap_t m_snap_seq = 0;
SnapContext m_snapc;
std::atomic<uint64_t> m_refcount = { 0 };
std::atomic<uint64_t> m_pending_ops = { 0 };
void handle_aio_notify_complete(AioCompletionImpl *aio_comp, int r);
};
} // namespace librados
#endif // CEPH_TEST_IO_CTX_IMPL_H
| 8,907 | 39.126126 | 84 | h |
null | ceph-main/src/test/librados_test_stub/TestMemCluster.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_MEM_CLUSTER_H
#define CEPH_TEST_MEM_CLUSTER_H
#include "test/librados_test_stub/TestCluster.h"
#include "include/buffer.h"
#include "include/interval_set.h"
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include <boost/shared_ptr.hpp>
#include <list>
#include <map>
#include <set>
#include <string>
namespace librados {
class TestMemCluster : public TestCluster {
public:
typedef std::map<std::string, bufferlist> OMap;
typedef std::map<ObjectLocator, OMap> FileOMaps;
typedef std::map<ObjectLocator, bufferlist> FileTMaps;
typedef std::map<std::string, bufferlist> XAttrs;
typedef std::map<ObjectLocator, XAttrs> FileXAttrs;
typedef std::set<ObjectHandler*> ObjectHandlers;
typedef std::map<ObjectLocator, ObjectHandlers> FileHandlers;
struct File {
File();
File(const File &rhs);
bufferlist data;
time_t mtime;
uint64_t objver;
uint64_t snap_id;
std::vector<uint64_t> snaps;
interval_set<uint64_t> snap_overlap;
bool exists;
ceph::shared_mutex lock =
ceph::make_shared_mutex("TestMemCluster::File::lock");
};
typedef boost::shared_ptr<File> SharedFile;
typedef std::list<SharedFile> FileSnapshots;
typedef std::map<ObjectLocator, FileSnapshots> Files;
typedef std::set<uint64_t> SnapSeqs;
struct Pool : public RefCountedObject {
Pool();
int64_t pool_id = 0;
SnapSeqs snap_seqs;
uint64_t snap_id = 1;
ceph::shared_mutex file_lock =
ceph::make_shared_mutex("TestMemCluster::Pool::file_lock");
Files files;
FileOMaps file_omaps;
FileTMaps file_tmaps;
FileXAttrs file_xattrs;
FileHandlers file_handlers;
};
TestMemCluster();
~TestMemCluster() override;
TestRadosClient *create_rados_client(CephContext *cct) override;
int register_object_handler(int64_t pool_id, const ObjectLocator& locator,
ObjectHandler* object_handler) override;
void unregister_object_handler(int64_t pool_id, const ObjectLocator& locator,
ObjectHandler* object_handler) override;
int pool_create(const std::string &pool_name);
int pool_delete(const std::string &pool_name);
int pool_get_base_tier(int64_t pool_id, int64_t* base_tier);
int pool_list(std::list<std::pair<int64_t, std::string> >& v);
int64_t pool_lookup(const std::string &name);
int pool_reverse_lookup(int64_t id, std::string *name);
Pool *get_pool(int64_t pool_id);
Pool *get_pool(const std::string &pool_name);
void allocate_client(uint32_t *nonce, uint64_t *global_id);
void deallocate_client(uint32_t nonce);
bool is_blocklisted(uint32_t nonce) const;
void blocklist(uint32_t nonce);
void transaction_start(const ObjectLocator& locator);
void transaction_finish(const ObjectLocator& locator);
private:
typedef std::map<std::string, Pool*> Pools;
typedef std::set<uint32_t> Blocklist;
mutable ceph::mutex m_lock =
ceph::make_mutex("TestMemCluster::m_lock");
Pools m_pools;
int64_t m_pool_id = 0;
uint32_t m_next_nonce;
uint64_t m_next_global_id = 1234;
Blocklist m_blocklist;
ceph::condition_variable m_transaction_cond;
std::set<ObjectLocator> m_transactions;
Pool *get_pool(const ceph::mutex& lock, int64_t pool_id);
};
} // namespace librados
#endif // CEPH_TEST_MEM_CLUSTER_H
| 3,468 | 26.752 | 79 | h |
null | ceph-main/src/test/librados_test_stub/TestMemIoCtxImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_MEM_IO_CTX_IMPL_H
#define CEPH_TEST_MEM_IO_CTX_IMPL_H
#include "test/librados_test_stub/TestIoCtxImpl.h"
#include "test/librados_test_stub/TestMemCluster.h"
namespace librados {
class TestMemRadosClient;
class TestMemIoCtxImpl : public TestIoCtxImpl {
public:
TestMemIoCtxImpl();
TestMemIoCtxImpl(TestMemRadosClient *client, int64_t m_pool_id,
const std::string& pool_name,
TestMemCluster::Pool *pool);
~TestMemIoCtxImpl() override;
TestIoCtxImpl *clone() override;
int aio_remove(const std::string& oid, AioCompletionImpl *c, int flags = 0) override;
int append(const std::string& oid, const bufferlist &bl,
const SnapContext &snapc) override;
int assert_exists(const std::string &oid, uint64_t snap_id) override;
int assert_version(const std::string &oid, uint64_t ver) override;
int create(const std::string& oid, bool exclusive,
const SnapContext &snapc) override;
int list_snaps(const std::string& o, snap_set_t *out_snaps) override;
int omap_get_vals(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals) override;
int omap_get_vals2(const std::string& oid,
const std::string& start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore) override;
int omap_rm_keys(const std::string& oid,
const std::set<std::string>& keys) override;
int omap_set(const std::string& oid, const std::map<std::string,
bufferlist> &map) override;
int read(const std::string& oid, size_t len, uint64_t off,
bufferlist *bl, uint64_t snap_id, uint64_t* objver) override;
int remove(const std::string& oid, const SnapContext &snapc) override;
int selfmanaged_snap_create(uint64_t *snapid) override;
int selfmanaged_snap_remove(uint64_t snapid) override;
int selfmanaged_snap_rollback(const std::string& oid,
uint64_t snapid) override;
int set_alloc_hint(const std::string& oid, uint64_t expected_object_size,
uint64_t expected_write_size, uint32_t flags,
const SnapContext &snapc) override;
int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
uint64_t snap_id) override;
int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) override;
int truncate(const std::string& oid, uint64_t size,
const SnapContext &snapc) override;
int write(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) override;
int write_full(const std::string& oid, bufferlist& bl,
const SnapContext &snapc) override;
int writesame(const std::string& oid, bufferlist& bl, size_t len,
uint64_t off, const SnapContext &snapc) override;
int cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl,
uint64_t snap_id) override;
int xattr_get(const std::string& oid,
std::map<std::string, bufferlist>* attrset) override;
int xattr_set(const std::string& oid, const std::string &name,
bufferlist& bl) override;
int zero(const std::string& oid, uint64_t off, uint64_t len,
const SnapContext &snapc) override;
protected:
TestMemCluster::Pool *get_pool() {
return m_pool;
}
private:
TestMemIoCtxImpl(const TestMemIoCtxImpl&);
TestMemRadosClient *m_client = nullptr;
TestMemCluster::Pool *m_pool = nullptr;
void append_clone(bufferlist& src, bufferlist* dest);
size_t clip_io(size_t off, size_t len, size_t bl_len);
void ensure_minimum_length(size_t len, bufferlist *bl);
TestMemCluster::SharedFile get_file(const std::string &oid, bool write,
uint64_t snap_id,
const SnapContext &snapc);
};
} // namespace librados
#endif // CEPH_TEST_MEM_IO_CTX_IMPL_H
| 4,396 | 40.87619 | 87 | h |
null | ceph-main/src/test/librados_test_stub/TestMemRadosClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_MEM_RADOS_CLIENT_H
#define CEPH_TEST_MEM_RADOS_CLIENT_H
#include "test/librados_test_stub/TestRadosClient.h"
#include "include/ceph_assert.h"
#include <list>
#include <string>
namespace librados {
class AioCompletionImpl;
class TestMemCluster;
class TestMemRadosClient : public TestRadosClient {
public:
TestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster);
~TestMemRadosClient() override;
TestIoCtxImpl *create_ioctx(int64_t pool_id,
const std::string &pool_name) override;
uint32_t get_nonce() override {
return m_nonce;
}
uint64_t get_instance_id() override {
return m_global_id;
}
int get_min_compatible_osd(int8_t* require_osd_release) override {
*require_osd_release = CEPH_RELEASE_OCTOPUS;
return 0;
}
int get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client) override {
*min_compat_client = CEPH_RELEASE_MIMIC;
*require_min_compat_client = CEPH_RELEASE_MIMIC;
return 0;
}
void object_list(int64_t pool_id,
std::list<librados::TestRadosClient::Object> *list) override;
int service_daemon_register(const std::string& service,
const std::string& name,
const std::map<std::string,std::string>& metadata) override {
return 0;
}
int service_daemon_update_status(std::map<std::string,std::string>&& status) override {
return 0;
}
int pool_create(const std::string &pool_name) override;
int pool_delete(const std::string &pool_name) override;
int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) override;
int pool_list(std::list<std::pair<int64_t, std::string> >& v) override;
int64_t pool_lookup(const std::string &name) override;
int pool_reverse_lookup(int64_t id, std::string *name) override;
int watch_flush() override;
bool is_blocklisted() const override;
int blocklist_add(const std::string& client_address,
uint32_t expire_seconds) override;
protected:
TestMemCluster *get_mem_cluster() {
return m_mem_cluster;
}
protected:
void transaction_start(const std::string& nspace,
const std::string &oid) override;
void transaction_finish(const std::string& nspace,
const std::string &oid) override;
private:
TestMemCluster *m_mem_cluster;
uint32_t m_nonce;
uint64_t m_global_id;
};
} // namespace librados
#endif // CEPH_TEST_MEM_RADOS_CLIENT_H
| 2,667 | 28.977528 | 91 | h |
null | ceph-main/src/test/librados_test_stub/TestRadosClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RADOS_CLIENT_H
#define CEPH_TEST_RADOS_CLIENT_H
#include <map>
#include <memory>
#include <list>
#include <string>
#include <vector>
#include <atomic>
#include <boost/function.hpp>
#include <boost/functional/hash.hpp>
#include "include/rados/librados.hpp"
#include "common/config.h"
#include "common/config_obs.h"
#include "include/buffer_fwd.h"
#include "test/librados_test_stub/TestWatchNotify.h"
class Finisher;
namespace boost { namespace asio { struct io_context; }}
namespace ceph { namespace async { struct io_context_pool; }}
namespace librados {
class TestIoCtxImpl;
class TestRadosClient : public md_config_obs_t {
public:
static void Deallocate(librados::TestRadosClient* client)
{
client->put();
}
typedef boost::function<int()> AioFunction;
struct Object {
std::string oid;
std::string locator;
std::string nspace;
};
class Transaction {
public:
Transaction(TestRadosClient *rados_client, const std::string& nspace,
const std::string &oid)
: rados_client(rados_client), nspace(nspace), oid(oid) {
rados_client->transaction_start(nspace, oid);
}
~Transaction() {
rados_client->transaction_finish(nspace, oid);
}
private:
TestRadosClient *rados_client;
std::string nspace;
std::string oid;
};
TestRadosClient(CephContext *cct, TestWatchNotify *watch_notify);
void get();
void put();
virtual CephContext *cct();
virtual uint32_t get_nonce() = 0;
virtual uint64_t get_instance_id() = 0;
virtual int get_min_compatible_osd(int8_t* require_osd_release) = 0;
virtual int get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client) = 0;
virtual int connect();
virtual void shutdown();
virtual int wait_for_latest_osdmap();
virtual TestIoCtxImpl *create_ioctx(int64_t pool_id,
const std::string &pool_name) = 0;
virtual int mon_command(const std::vector<std::string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
virtual void object_list(int64_t pool_id,
std::list<librados::TestRadosClient::Object> *list) = 0;
virtual int service_daemon_register(const std::string& service,
const std::string& name,
const std::map<std::string,std::string>& metadata) = 0;
virtual int service_daemon_update_status(std::map<std::string,std::string>&& status) = 0;
virtual int pool_create(const std::string &pool_name) = 0;
virtual int pool_delete(const std::string &pool_name) = 0;
virtual int pool_get_base_tier(int64_t pool_id, int64_t* base_tier) = 0;
virtual int pool_list(std::list<std::pair<int64_t, std::string> >& v) = 0;
virtual int64_t pool_lookup(const std::string &name) = 0;
virtual int pool_reverse_lookup(int64_t id, std::string *name) = 0;
virtual int aio_watch_flush(AioCompletionImpl *c);
virtual int watch_flush() = 0;
virtual bool is_blocklisted() const = 0;
virtual int blocklist_add(const std::string& client_address,
uint32_t expire_seconds) = 0;
virtual int wait_for_latest_osd_map() {
return 0;
}
Finisher *get_aio_finisher() {
return m_aio_finisher;
}
TestWatchNotify *get_watch_notify() {
return m_watch_notify;
}
void add_aio_operation(const std::string& oid, bool queue_callback,
const AioFunction &aio_function, AioCompletionImpl *c);
void flush_aio_operations();
void flush_aio_operations(AioCompletionImpl *c);
void finish_aio_completion(AioCompletionImpl *c, int r);
boost::asio::io_context& get_io_context();
protected:
virtual ~TestRadosClient();
virtual void transaction_start(const std::string& nspace,
const std::string &oid) = 0;
virtual void transaction_finish(const std::string& nspace,
const std::string &oid) = 0;
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) override;
private:
struct IOContextPool;
CephContext *m_cct;
std::atomic<uint64_t> m_refcount = { 0 };
TestWatchNotify *m_watch_notify;
Finisher *get_finisher(const std::string& oid);
Finisher *m_aio_finisher;
std::vector<Finisher *> m_finishers;
boost::hash<std::string> m_hash;
std::unique_ptr<ceph::async::io_context_pool> m_io_context_pool;
};
} // namespace librados
#endif // CEPH_TEST_RADOS_CLIENT_H
| 4,728 | 28.01227 | 93 | h |
null | ceph-main/src/test/librados_test_stub/TestWatchNotify.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_WATCH_NOTIFY_H
#define CEPH_TEST_WATCH_NOTIFY_H
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include <boost/noncopyable.hpp>
#include <boost/shared_ptr.hpp>
#include <list>
#include <map>
class Finisher;
namespace librados {
class TestCluster;
class TestRadosClient;
class TestWatchNotify : boost::noncopyable {
public:
typedef std::pair<uint64_t, uint64_t> WatcherID;
typedef std::set<WatcherID> WatcherIDs;
typedef std::map<std::pair<uint64_t, uint64_t>, bufferlist> NotifyResponses;
struct NotifyHandle {
TestRadosClient *rados_client = nullptr;
WatcherIDs pending_watcher_ids;
NotifyResponses notify_responses;
bufferlist *pbl = nullptr;
Context *on_notify = nullptr;
};
typedef boost::shared_ptr<NotifyHandle> SharedNotifyHandle;
typedef std::map<uint64_t, SharedNotifyHandle> NotifyHandles;
struct WatchHandle {
TestRadosClient *rados_client = nullptr;
std::string addr;
uint32_t nonce;
uint64_t gid;
uint64_t handle;
librados::WatchCtx* watch_ctx;
librados::WatchCtx2* watch_ctx2;
};
typedef std::map<uint64_t, WatchHandle> WatchHandles;
struct ObjectHandler;
typedef boost::shared_ptr<ObjectHandler> SharedObjectHandler;
struct Watcher {
Watcher(int64_t pool_id, const std::string& nspace, const std::string& oid)
: pool_id(pool_id), nspace(nspace), oid(oid) {
}
int64_t pool_id;
std::string nspace;
std::string oid;
SharedObjectHandler object_handler;
WatchHandles watch_handles;
NotifyHandles notify_handles;
};
typedef boost::shared_ptr<Watcher> SharedWatcher;
TestWatchNotify(TestCluster* test_cluster);
int list_watchers(int64_t pool_id, const std::string& nspace,
const std::string& o, std::list<obj_watch_t> *out_watchers);
void aio_flush(TestRadosClient *rados_client, Context *on_finish);
void aio_watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o, uint64_t gid,
uint64_t *handle, librados::WatchCtx *watch_ctx,
librados::WatchCtx2 *watch_ctx2, Context *on_finish);
void aio_unwatch(TestRadosClient *rados_client, uint64_t handle,
Context *on_finish);
void aio_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& oid,
const bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
Context *on_notify);
void flush(TestRadosClient *rados_client);
int notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl);
void notify_ack(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o,
uint64_t notify_id, uint64_t handle, uint64_t gid,
bufferlist& bl);
int watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o, uint64_t gid,
uint64_t *handle, librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2);
int unwatch(TestRadosClient *rados_client, uint64_t handle);
void blocklist(uint32_t nonce);
private:
typedef std::tuple<int64_t, std::string, std::string> PoolFile;
typedef std::map<PoolFile, SharedWatcher> FileWatchers;
TestCluster *m_test_cluster;
uint64_t m_handle = 0;
uint64_t m_notify_id = 0;
ceph::mutex m_lock =
ceph::make_mutex("librados::TestWatchNotify::m_lock");
AsyncOpTracker m_async_op_tracker;
FileWatchers m_file_watchers;
SharedWatcher get_watcher(int64_t pool_id, const std::string& nspace,
const std::string& oid);
void maybe_remove_watcher(SharedWatcher shared_watcher);
void execute_watch(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string& o,
uint64_t gid, uint64_t *handle,
librados::WatchCtx *watch_ctx,
librados::WatchCtx2 *watch_ctx2,
Context *on_finish);
void execute_unwatch(TestRadosClient *rados_client, uint64_t handle,
Context *on_finish);
void execute_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string &oid,
const bufferlist &bl, bufferlist *pbl,
Context *on_notify);
void ack_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string &oid,
uint64_t notify_id, const WatcherID &watcher_id,
const bufferlist &bl);
void finish_notify(TestRadosClient *rados_client, int64_t pool_id,
const std::string& nspace, const std::string &oid,
uint64_t notify_id);
void handle_object_removed(int64_t pool_id, const std::string& nspace,
const std::string& oid);
};
} // namespace librados
#endif // CEPH_TEST_WATCH_NOTIFY_H
| 5,364 | 35.006711 | 80 | h |
null | ceph-main/src/test/libradosstriper/TestCase.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RADOS_TESTCASE_H
#define CEPH_TEST_RADOS_TESTCASE_H
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/radosstriper/libradosstriper.h"
#include "include/radosstriper/libradosstriper.hpp"
#include "gtest/gtest.h"
#include <string>
/**
* These test cases create a temporary pool that lives as long as the
* test case. Each test within a test case gets a new ioctx and striper
* set to a unique namespace within the pool.
*
* Since pool creation and deletion is slow, this allows many tests to
* run faster.
*/
class StriperTest : public ::testing::Test {
public:
StriperTest() {}
~StriperTest() override {}
protected:
static void SetUpTestCase();
static void TearDownTestCase();
static rados_t s_cluster;
static std::string pool_name;
void SetUp() override;
void TearDown() override;
rados_t cluster = NULL;
rados_ioctx_t ioctx = NULL;
rados_striper_t striper = NULL;
};
class StriperTestPP : public ::testing::Test {
public:
StriperTestPP() : cluster(s_cluster) {}
~StriperTestPP() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
libradosstriper::RadosStriper striper;
};
struct TestData {
uint32_t stripe_unit;
uint32_t stripe_count;
uint32_t object_size;
size_t size;
};
// this is pure copy and paste from previous class
// but for the inheritance from TestWithParam
// with gtest >= 1.6, we couldd avoid this by using
// inheritance from WithParamInterface
class StriperTestParam : public ::testing::TestWithParam<TestData> {
public:
StriperTestParam() : cluster(s_cluster) {}
~StriperTestParam() override {}
static void SetUpTestCase();
static void TearDownTestCase();
protected:
static librados::Rados s_cluster;
static std::string pool_name;
void SetUp() override;
librados::Rados &cluster;
librados::IoCtx ioctx;
libradosstriper::RadosStriper striper;
};
#endif
| 2,186 | 25.349398 | 72 | h |
null | ceph-main/src/test/librbd/test_fixture.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/int_types.h"
#include "include/rados/librados.h"
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "gtest/gtest.h"
#include <set>
#include <string>
using namespace ceph;
class TestFixture : public ::testing::Test {
public:
TestFixture();
static void SetUpTestCase();
static void TearDownTestCase();
static std::string get_temp_image_name();
void SetUp() override;
void TearDown() override;
int open_image(const std::string &image_name, librbd::ImageCtx **ictx);
void close_image(librbd::ImageCtx *ictx);
int snap_create(librbd::ImageCtx &ictx, const std::string &snap_name);
int snap_protect(librbd::ImageCtx &ictx, const std::string &snap_name);
int flatten(librbd::ImageCtx &ictx, librbd::ProgressContext &prog_ctx);
int resize(librbd::ImageCtx *ictx, uint64_t size);
int lock_image(librbd::ImageCtx &ictx, ClsLockType lock_type,
const std::string &cookie);
int unlock_image();
int flush_writeback_cache(librbd::ImageCtx *image_ctx);
int acquire_exclusive_lock(librbd::ImageCtx &ictx);
static std::string _pool_name;
static librados::Rados _rados;
static rados_t _cluster;
static uint64_t _image_number;
static std::string _data_pool;
CephContext* m_cct = nullptr;
librados::IoCtx m_ioctx;
librbd::RBD m_rbd;
std::string m_image_name;
uint64_t m_image_size;
std::set<librbd::ImageCtx *> m_ictxs;
std::string m_lock_object;
std::string m_lock_cookie;
};
| 1,585 | 25 | 73 | h |
null | ceph-main/src/test/librbd/test_mock_fixture.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_TEST_MOCK_FIXTURE_H
#define CEPH_TEST_LIBRBD_TEST_MOCK_FIXTURE_H
#include "test/librbd/test_fixture.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/librados_test_stub/LibradosTestStub.h"
#include "librbd/asio/ContextWQ.h"
#include <boost/shared_ptr.hpp>
#include <gmock/gmock.h>
namespace librados {
class TestCluster;
class MockTestMemCluster;
class MockTestMemIoCtxImpl;
class MockTestMemRadosClient;
}
namespace librbd {
class MockImageCtx;
}
ACTION_P(CopyInBufferlist, str) {
arg0->append(str);
}
ACTION_P2(CompleteContext, r, wq) {
librbd::asio::ContextWQ *context_wq = reinterpret_cast<
librbd::asio::ContextWQ *>(wq);
if (context_wq != NULL) {
context_wq->queue(arg0, r);
} else {
arg0->complete(r);
}
}
ACTION_P(DispatchContext, wq) {
wq->queue(arg0, arg1);
}
ACTION_P3(FinishRequest, request, r, mock) {
librbd::MockImageCtx *mock_image_ctx =
reinterpret_cast<librbd::MockImageCtx *>(mock);
mock_image_ctx->image_ctx->op_work_queue->queue(request->on_finish, r);
}
ACTION_P(GetReference, ref_object) {
ref_object->get();
}
MATCHER_P(ContentsEqual, bl, "") {
// TODO fix const-correctness of bufferlist
return const_cast<bufferlist &>(arg).contents_equal(
const_cast<bufferlist &>(bl));
}
class TestMockFixture : public TestFixture {
public:
typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
static void SetUpTestCase();
static void TearDownTestCase();
void TearDown() override;
void expect_op_work_queue(librbd::MockImageCtx &mock_image_ctx);
void expect_unlock_exclusive_lock(librbd::ImageCtx &ictx);
void initialize_features(librbd::ImageCtx *ictx,
librbd::MockImageCtx &mock_image_ctx,
librbd::MockExclusiveLock &mock_exclusive_lock,
librbd::MockJournal &mock_journal,
librbd::MockObjectMap &mock_object_map);
void expect_is_journal_appending(librbd::MockJournal &mock_journal,
bool appending);
void expect_is_journal_replaying(librbd::MockJournal &mock_journal);
void expect_is_journal_ready(librbd::MockJournal &mock_journal);
void expect_allocate_op_tid(librbd::MockImageCtx &mock_image_ctx);
void expect_append_op_event(librbd::MockImageCtx &mock_image_ctx,
bool can_affect_io, int r);
void expect_commit_op_event(librbd::MockImageCtx &mock_image_ctx, int r);
private:
static TestClusterRef s_test_cluster;
};
#endif // CEPH_TEST_LIBRBD_TEST_MOCK_FIXTURE_H
| 2,684 | 28.833333 | 75 | h |
null | ceph-main/src/test/librbd/test_support.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/int_types.h"
#include "include/rados/librados.h"
#include "include/rbd/librbd.hpp"
#include <string>
static const uint64_t IMAGE_STRIPE_UNIT = 65536;
static const uint64_t IMAGE_STRIPE_COUNT = 16;
#define TEST_IO_SIZE 512
#define TEST_IO_TO_SNAP_SIZE 80
bool get_features(uint64_t *features);
bool is_feature_enabled(uint64_t feature);
int create_image_pp(librbd::RBD &rbd, librados::IoCtx &ioctx,
const std::string &name, uint64_t size);
int create_image_full_pp(librbd::RBD &rbd, librados::IoCtx &ioctx,
const std::string &name, uint64_t size,
uint64_t features, bool old_format, int *order);
int clone_image_pp(librbd::RBD &rbd, librbd::Image &p_image, librados::IoCtx &p_ioctx,
const char *p_name, const char *p_snap_name, librados::IoCtx &c_ioctx,
const char *c_name, uint64_t features);
int get_image_id(librbd::Image &image, std::string *image_id);
int create_image_data_pool(librados::Rados &rados, std::string &data_pool, bool *created);
bool is_librados_test_stub(librados::Rados &rados);
bool is_rbd_pwl_enabled(ceph::common::CephContext *ctx);
#define REQUIRE(x) { \
if (!(x)) { \
GTEST_SKIP() << "Skipping due to unmet REQUIRE"; \
} \
}
#define REQUIRE_FEATURE(feature) REQUIRE(is_feature_enabled(feature))
#define REQUIRE_FORMAT_V1() REQUIRE(!is_feature_enabled(0))
#define REQUIRE_FORMAT_V2() REQUIRE_FEATURE(0)
| 1,582 | 38.575 | 90 | h |
null | ceph-main/src/test/librbd/mock/MockExclusiveLock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_EXCLUSIVE_LOCK_H
#define CEPH_TEST_LIBRBD_MOCK_EXCLUSIVE_LOCK_H
#include "common/RefCountedObj.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/io/Types.h"
#include "gmock/gmock.h"
class Context;
namespace librbd {
struct MockExclusiveLock {
MOCK_CONST_METHOD0(is_lock_owner, bool());
MOCK_METHOD2(init, void(uint64_t features, Context*));
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD1(reacquire_lock, void(Context*));
MOCK_METHOD1(try_acquire_lock, void(Context*));
MOCK_METHOD1(block_requests, void(int));
MOCK_METHOD0(unblock_requests, void());
MOCK_METHOD1(acquire_lock, void(Context *));
MOCK_METHOD1(release_lock, void(Context *));
MOCK_METHOD2(accept_request, bool(exclusive_lock::OperationRequestType,
int *));
MOCK_METHOD0(accept_ops, bool());
MOCK_METHOD0(get_unlocked_op_error, int());
MOCK_METHOD3(set_require_lock, void(bool init_shutdown, io::Direction,
Context*));
MOCK_METHOD1(unset_require_lock, void(io::Direction));
MOCK_METHOD1(start_op, Context*(int*));
void get() {}
void put() {}
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_EXCLUSIVE_LOCK_H
| 1,418 | 26.823529 | 73 | h |
null | ceph-main/src/test/librbd/mock/MockImageCtx.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IMAGE_CTX_H
#define CEPH_TEST_LIBRBD_MOCK_IMAGE_CTX_H
#include "include/rados/librados.hpp"
#include "test/librbd/mock/MockContextWQ.h"
#include "test/librbd/mock/MockExclusiveLock.h"
#include "test/librbd/mock/MockImageState.h"
#include "test/librbd/mock/MockImageWatcher.h"
#include "test/librbd/mock/MockJournal.h"
#include "test/librbd/mock/MockObjectMap.h"
#include "test/librbd/mock/MockOperations.h"
#include "test/librbd/mock/MockPluginRegistry.h"
#include "test/librbd/mock/MockReadahead.h"
#include "test/librbd/mock/io/MockImageDispatcher.h"
#include "test/librbd/mock/io/MockObjectDispatcher.h"
#include "common/WorkQueue.h"
#include "common/zipkin_trace.h"
#include "librbd/ImageCtx.h"
#include "gmock/gmock.h"
#include <string>
class MockSafeTimer;
namespace librbd {
namespace operation {
template <typename> class ResizeRequest;
}
namespace crypto {
class MockEncryptionFormat;
}
struct MockImageCtx {
static MockImageCtx *s_instance;
static MockImageCtx *create(const std::string &image_name,
const std::string &image_id,
const char *snap, librados::IoCtx& p,
bool read_only) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MockImageCtx(librbd::ImageCtx &image_ctx);
virtual ~MockImageCtx();
void wait_for_async_ops();
void wait_for_async_requests() {
async_ops_lock.lock();
if (async_requests.empty()) {
async_ops_lock.unlock();
return;
}
C_SaferCond ctx;
async_requests_waiters.push_back(&ctx);
async_ops_lock.unlock();
ctx.wait();
}
MOCK_METHOD1(init_layout, void(int64_t));
MOCK_CONST_METHOD1(get_object_name, std::string(uint64_t));
MOCK_CONST_METHOD0(get_object_size, uint64_t());
MOCK_CONST_METHOD0(get_current_size, uint64_t());
MOCK_CONST_METHOD1(get_image_size, uint64_t(librados::snap_t));
MOCK_CONST_METHOD1(get_area_size, uint64_t(io::ImageArea));
MOCK_CONST_METHOD1(get_object_count, uint64_t(librados::snap_t));
MOCK_CONST_METHOD1(get_read_flags, int(librados::snap_t));
MOCK_CONST_METHOD2(get_flags, int(librados::snap_t in_snap_id,
uint64_t *flags));
MOCK_CONST_METHOD2(get_snap_id,
librados::snap_t(cls::rbd::SnapshotNamespace snap_namespace,
std::string in_snap_name));
MOCK_CONST_METHOD1(get_snap_info, const SnapInfo*(librados::snap_t));
MOCK_CONST_METHOD2(get_snap_name, int(librados::snap_t, std::string *));
MOCK_CONST_METHOD2(get_snap_namespace, int(librados::snap_t,
cls::rbd::SnapshotNamespace *out_snap_namespace));
MOCK_CONST_METHOD2(get_parent_spec, int(librados::snap_t in_snap_id,
cls::rbd::ParentImageSpec *pspec));
MOCK_CONST_METHOD1(get_parent_info, const ParentImageInfo*(librados::snap_t));
MOCK_CONST_METHOD2(get_parent_overlap, int(librados::snap_t in_snap_id,
uint64_t *raw_overlap));
MOCK_CONST_METHOD2(reduce_parent_overlap,
std::pair<uint64_t, io::ImageArea>(uint64_t, bool));
MOCK_CONST_METHOD4(prune_parent_extents,
uint64_t(std::vector<std::pair<uint64_t, uint64_t>>&,
io::ImageArea, uint64_t, bool));
MOCK_CONST_METHOD2(is_snap_protected, int(librados::snap_t in_snap_id,
bool *is_protected));
MOCK_CONST_METHOD2(is_snap_unprotected, int(librados::snap_t in_snap_id,
bool *is_unprotected));
MOCK_CONST_METHOD0(get_create_timestamp, utime_t());
MOCK_CONST_METHOD0(get_access_timestamp, utime_t());
MOCK_CONST_METHOD0(get_modify_timestamp, utime_t());
MOCK_METHOD1(set_access_timestamp, void(const utime_t at));
MOCK_METHOD1(set_modify_timestamp, void(const utime_t at));
MOCK_METHOD8(add_snap, void(cls::rbd::SnapshotNamespace in_snap_namespace,
std::string in_snap_name,
librados::snap_t id,
uint64_t in_size, const ParentImageInfo &parent,
uint8_t protection_status, uint64_t flags, utime_t timestamp));
MOCK_METHOD3(rm_snap, void(cls::rbd::SnapshotNamespace in_snap_namespace,
std::string in_snap_name,
librados::snap_t id));
MOCK_METHOD0(user_flushed, void());
MOCK_METHOD1(flush_copyup, void(Context *));
MOCK_CONST_METHOD1(test_features, bool(uint64_t test_features));
MOCK_CONST_METHOD2(test_features, bool(uint64_t test_features,
const ceph::shared_mutex &in_image_lock));
MOCK_CONST_METHOD1(test_op_features, bool(uint64_t op_features));
MOCK_METHOD1(cancel_async_requests, void(Context*));
MOCK_METHOD0(create_exclusive_lock, MockExclusiveLock*());
MOCK_METHOD1(create_object_map, MockObjectMap*(uint64_t));
MOCK_METHOD0(create_journal, MockJournal*());
MOCK_METHOD0(notify_update, void());
MOCK_METHOD1(notify_update, void(Context *));
MOCK_CONST_METHOD0(get_exclusive_lock_policy, exclusive_lock::Policy*());
MOCK_METHOD1(set_exclusive_lock_policy, void(exclusive_lock::Policy*));
MOCK_CONST_METHOD0(get_journal_policy, journal::Policy*());
MOCK_METHOD1(set_journal_policy, void(journal::Policy*));
MOCK_METHOD2(apply_metadata, int(const std::map<std::string, bufferlist> &,
bool));
MOCK_CONST_METHOD0(get_stripe_count, uint64_t());
MOCK_CONST_METHOD0(get_stripe_period, uint64_t());
MOCK_METHOD0(rebuild_data_io_context, void());
IOContext get_data_io_context();
IOContext duplicate_data_io_context();
static void set_timer_instance(MockSafeTimer *timer, ceph::mutex *timer_lock);
static void get_timer_instance(CephContext *cct, MockSafeTimer **timer,
ceph::mutex **timer_lock);
ImageCtx *image_ctx;
CephContext *cct;
PerfCounters *perfcounter;
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
uint64_t snap_id;
bool snap_exists;
::SnapContext snapc;
std::vector<librados::snap_t> snaps;
std::map<librados::snap_t, SnapInfo> snap_info;
std::map<ImageCtx::SnapKey, librados::snap_t, ImageCtx::SnapKeyComparator> snap_ids;
bool old_format;
bool read_only;
uint32_t read_only_flags;
uint32_t read_only_mask;
bool clone_copy_on_read;
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> lockers;
bool exclusive_locked;
std::string lock_tag;
std::shared_ptr<AsioEngine> asio_engine;
neorados::RADOS& rados_api;
librados::IoCtx md_ctx;
librados::IoCtx data_ctx;
ceph::shared_mutex &owner_lock;
ceph::shared_mutex &image_lock;
ceph::shared_mutex ×tamp_lock;
ceph::mutex &async_ops_lock;
ceph::mutex ©up_list_lock;
uint8_t order;
uint64_t size;
uint64_t features;
uint64_t flags;
uint64_t op_features;
bool operations_disabled;
uint64_t stripe_unit;
uint64_t stripe_count;
std::string object_prefix;
std::string header_oid;
std::string id;
std::string name;
ParentImageInfo parent_md;
MigrationInfo migration_info;
char *format_string;
cls::rbd::GroupSpec group_spec;
file_layout_t layout;
xlist<operation::ResizeRequest<MockImageCtx>*> resize_reqs;
xlist<AsyncRequest<MockImageCtx>*> async_requests;
std::list<Context*> async_requests_waiters;
std::map<uint64_t, io::CopyupRequest<MockImageCtx>*> copyup_list;
io::MockImageDispatcher *io_image_dispatcher;
io::MockObjectDispatcher *io_object_dispatcher;
MockContextWQ *op_work_queue;
MockPluginRegistry* plugin_registry;
MockReadahead readahead;
uint64_t readahead_max_bytes;
EventSocket &event_socket;
MockImageCtx *child = nullptr;
MockImageCtx *parent;
MockOperations *operations;
MockImageState *state;
MockImageWatcher *image_watcher;
MockObjectMap *object_map;
MockExclusiveLock *exclusive_lock;
MockJournal *journal;
ZTracer::Endpoint trace_endpoint;
std::unique_ptr<crypto::MockEncryptionFormat> encryption_format;
uint64_t sparse_read_threshold_bytes;
uint32_t discard_granularity_bytes;
int mirroring_replay_delay;
bool non_blocking_aio;
bool blkin_trace_all;
bool enable_alloc_hint;
uint32_t alloc_hint_flags;
uint32_t read_flags;
bool ignore_migrating;
bool enable_sparse_copyup;
uint64_t mtime_update_interval;
uint64_t atime_update_interval;
bool cache;
ConfigProxy config;
std::set<std::string> config_overrides;
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IMAGE_CTX_H
| 8,644 | 31.996183 | 86 | h |
null | ceph-main/src/test/librbd/mock/MockImageState.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IMAGE_STATE_H
#define CEPH_TEST_LIBRBD_MOCK_IMAGE_STATE_H
#include <gmock/gmock.h>
#include "cls/rbd/cls_rbd_types.h"
class Context;
namespace librbd {
class UpdateWatchCtx;
struct MockImageState {
MOCK_CONST_METHOD0(is_refresh_required, bool());
MOCK_METHOD1(refresh, void(Context*));
MOCK_METHOD2(open, void(bool, Context*));
MOCK_METHOD0(close, int());
MOCK_METHOD1(close, void(Context*));
MOCK_METHOD2(snap_set, void(uint64_t snap_id, Context*));
MOCK_METHOD1(prepare_lock, void(Context*));
MOCK_METHOD0(handle_prepare_lock_complete, void());
MOCK_METHOD2(register_update_watcher, int(UpdateWatchCtx *, uint64_t *));
MOCK_METHOD2(unregister_update_watcher, void(uint64_t, Context *));
MOCK_METHOD0(handle_update_notification, void());
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IMAGE_STATE_H
| 975 | 23.4 | 75 | h |
null | ceph-main/src/test/librbd/mock/MockImageWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IMAGE_WATCHER_H
#define CEPH_TEST_LIBRBD_MOCK_IMAGE_WATCHER_H
#include "gmock/gmock.h"
class Context;
namespace librbd {
class ProgressContext;
struct MockImageWatcher {
MOCK_METHOD0(is_registered, bool());
MOCK_METHOD0(is_unregistered, bool());
MOCK_METHOD0(is_blocklisted, bool());
MOCK_METHOD0(unregister_watch, void());
MOCK_METHOD1(flush, void(Context *));
MOCK_CONST_METHOD0(get_watch_handle, uint64_t());
MOCK_METHOD0(notify_acquired_lock, void());
MOCK_METHOD0(notify_released_lock, void());
MOCK_METHOD0(notify_request_lock, void());
MOCK_METHOD3(notify_quiesce, void(uint64_t *, ProgressContext &, Context *));
MOCK_METHOD2(notify_unquiesce, void(uint64_t, Context *));
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IMAGE_WATCHER_H
| 914 | 25.142857 | 79 | h |
null | ceph-main/src/test/librbd/mock/MockJournal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_JOURNAL_H
#define CEPH_TEST_LIBRBD_MOCK_JOURNAL_H
#include "common/RefCountedObj.h"
#include "gmock/gmock.h"
#include "include/rados/librados_fwd.hpp"
#include "librbd/Journal.h"
#include "librbd/journal/Types.h"
#include <list>
struct Context;
struct ContextWQ;
namespace librbd {
struct ImageCtx;
struct MockJournal {
static MockJournal *s_instance;
static MockJournal *get_instance() {
ceph_assert(s_instance != nullptr);
return s_instance;
}
template <typename ImageCtxT>
static int is_tag_owner(ImageCtxT *image_ctx, bool *is_tag_owner) {
return get_instance()->is_tag_owner(is_tag_owner);
}
static void get_tag_owner(librados::IoCtx &,
const std::string &global_image_id,
std::string *tag_owner, ContextWQ *work_queue,
Context *on_finish) {
get_instance()->get_tag_owner(global_image_id, tag_owner,
work_queue, on_finish);
}
MockJournal() {
s_instance = this;
}
void get() {}
void put() {}
MOCK_CONST_METHOD0(is_journal_ready, bool());
MOCK_CONST_METHOD0(is_journal_replaying, bool());
MOCK_CONST_METHOD0(is_journal_appending, bool());
MOCK_METHOD1(wait_for_journal_ready, void(Context *));
MOCK_METHOD4(get_tag_owner, void(const std::string &,
std::string *, ContextWQ *,
Context *));
MOCK_CONST_METHOD0(is_tag_owner, bool());
MOCK_CONST_METHOD1(is_tag_owner, int(bool *));
MOCK_METHOD3(allocate_tag, void(const std::string &mirror_uuid,
const journal::TagPredecessor &predecessor,
Context *on_finish));
MOCK_METHOD1(open, void(Context *));
MOCK_METHOD1(close, void(Context *));
MOCK_CONST_METHOD0(get_tag_tid, uint64_t());
MOCK_CONST_METHOD0(get_tag_data, journal::TagData());
MOCK_METHOD0(allocate_op_tid, uint64_t());
MOCK_METHOD0(user_flushed, void());
MOCK_METHOD3(append_op_event_mock, void(uint64_t, const journal::EventEntry&,
Context *));
void append_op_event(uint64_t op_tid, journal::EventEntry &&event_entry,
Context *on_safe) {
// googlemock doesn't support move semantics
append_op_event_mock(op_tid, event_entry, on_safe);
}
MOCK_METHOD2(flush_event, void(uint64_t, Context *));
MOCK_METHOD2(wait_event, void(uint64_t, Context *));
MOCK_METHOD3(commit_op_event, void(uint64_t, int, Context *));
MOCK_METHOD2(replay_op_ready, void(uint64_t, Context *));
MOCK_METHOD1(add_listener, void(journal::Listener *));
MOCK_METHOD1(remove_listener, void(journal::Listener *));
MOCK_METHOD1(is_resync_requested, int(bool *));
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_JOURNAL_H
| 2,977 | 29.701031 | 79 | h |
null | ceph-main/src/test/librbd/mock/MockJournalPolicy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_JOURNAL_POLICY_H
#define CEPH_TEST_LIBRBD_MOCK_JOURNAL_POLICY_H
#include "librbd/journal/Policy.h"
#include "gmock/gmock.h"
namespace librbd {
struct MockJournalPolicy : public journal::Policy {
MOCK_CONST_METHOD0(append_disabled, bool());
MOCK_CONST_METHOD0(journal_disabled, bool());
MOCK_METHOD1(allocate_tag_on_lock, void(Context*));
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_JOURNAL_POLICY_H
| 554 | 23.130435 | 70 | h |
null | ceph-main/src/test/librbd/mock/MockObjectMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H
#define CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H
#include "librbd/Utils.h"
#include "gmock/gmock.h"
namespace librbd {
struct MockObjectMap {
MOCK_METHOD1(at, uint8_t(uint64_t));
uint8_t operator[](uint64_t object_no) {
return at(object_no);
}
MOCK_CONST_METHOD0(size, uint64_t());
MOCK_METHOD1(open, void(Context *on_finish));
MOCK_METHOD1(close, void(Context *on_finish));
MOCK_METHOD3(aio_resize, void(uint64_t new_size, uint8_t default_object_state,
Context *on_finish));
void get() {}
void put() {}
template <typename T, void(T::*MF)(int) = &T::complete>
bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1,
new_state, current_state, parent_trace,
ignore_enoent, callback_object);
}
template <typename T, void(T::*MF)(int) = &T::complete>
bool aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
auto ctx = util::create_context_callback<T, MF>(callback_object);
bool updated = aio_update(snap_id, start_object_no, end_object_no,
new_state, current_state, parent_trace,
ignore_enoent, ctx);
if (!updated) {
delete ctx;
}
return updated;
}
MOCK_METHOD8(aio_update, bool(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish));
MOCK_METHOD2(snapshot_add, void(uint64_t snap_id, Context *on_finish));
MOCK_METHOD2(snapshot_remove, void(uint64_t snap_id, Context *on_finish));
MOCK_METHOD2(rollback, void(uint64_t snap_id, Context *on_finish));
MOCK_CONST_METHOD1(object_may_exist, bool(uint64_t));
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H
| 2,646 | 36.28169 | 80 | h |
null | ceph-main/src/test/librbd/mock/MockOperations.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_OPERATIONS_H
#define CEPH_TEST_LIBRBD_MOCK_OPERATIONS_H
#include "cls/rbd/cls_rbd_types.h"
#include "include/int_types.h"
#include "include/rbd/librbd.hpp"
#include "gmock/gmock.h"
#include <string>
class Context;
namespace librbd {
struct MockOperations {
MOCK_METHOD2(execute_flatten, void(ProgressContext &prog_ctx,
Context *on_finish));
MOCK_METHOD2(execute_rebuild_object_map, void(ProgressContext &prog_ctx,
Context *on_finish));
MOCK_METHOD2(execute_rename, void(const std::string &dstname,
Context *on_finish));
MOCK_METHOD5(execute_resize, void(uint64_t size, bool allow_shrink,
ProgressContext &prog_ctx,
Context *on_finish,
uint64_t journal_op_tid));
MOCK_METHOD5(snap_create, void(const cls::rbd::SnapshotNamespace &snapshot_namespace,
const std::string &snap_name,
uint64_t flags,
ProgressContext &prog_ctx,
Context *on_finish));
MOCK_METHOD6(execute_snap_create, void(const cls::rbd::SnapshotNamespace &snapshot_namespace,
const std::string &snap_name,
Context *on_finish,
uint64_t journal_op_tid,
uint64_t flags,
ProgressContext &prog_ctx));
MOCK_METHOD3(snap_remove, void(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish));
MOCK_METHOD3(execute_snap_remove, void(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish));
MOCK_METHOD3(execute_snap_rename, void(uint64_t src_snap_id,
const std::string &snap_name,
Context *on_finish));
MOCK_METHOD4(execute_snap_rollback, void(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
ProgressContext &prog_ctx,
Context *on_finish));
MOCK_METHOD3(execute_snap_protect, void(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish));
MOCK_METHOD3(execute_snap_unprotect, void(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish));
MOCK_METHOD2(execute_snap_set_limit, void(uint64_t limit,
Context *on_finish));
MOCK_METHOD4(execute_update_features, void(uint64_t features, bool enabled,
Context *on_finish,
uint64_t journal_op_tid));
MOCK_METHOD3(execute_metadata_set, void(const std::string &key,
const std::string &value,
Context *on_finish));
MOCK_METHOD2(execute_metadata_remove, void(const std::string &key,
Context *on_finish));
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_OPERATIONS_H
| 3,640 | 48.876712 | 95 | h |
null | ceph-main/src/test/librbd/mock/MockPluginRegistry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_PLUGIN_REGISTRY_H
#define CEPH_TEST_LIBRBD_MOCK_PLUGIN_REGISTRY_H
#include <gmock/gmock.h>
class Context;
namespace librbd {
struct MockPluginRegistry{
MOCK_METHOD2(init, void(const std::string&, Context*));
MOCK_METHOD1(acquired_exclusive_lock, void(Context*));
MOCK_METHOD1(prerelease_exclusive_lock, void(Context*));
};
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_PLUGIN_REGISTRY_H
| 536 | 23.409091 | 70 | h |
null | ceph-main/src/test/librbd/mock/cache/MockImageCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_CACHE_MOCK_IMAGE_CACHE_H
#define CEPH_TEST_LIBRBD_CACHE_MOCK_IMAGE_CACHE_H
#include "gmock/gmock.h"
#include "librbd/io/Types.h"
#include <vector>
namespace librbd {
namespace cache {
struct MockImageCache {
typedef std::vector<std::pair<uint64_t,uint64_t> > Extents;
MOCK_METHOD4(aio_read_mock, void(const Extents &, ceph::bufferlist*, int,
Context *));
void aio_read(Extents&& image_extents, ceph::bufferlist* bl,
int fadvise_flags, Context *on_finish) {
aio_read_mock(image_extents, bl, fadvise_flags, on_finish);
}
MOCK_METHOD4(aio_write_mock, void(const Extents &, const ceph::bufferlist &,
int, Context *));
void aio_write(Extents&& image_extents, ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) {
aio_write_mock(image_extents, bl, fadvise_flags, on_finish);
}
MOCK_METHOD4(aio_discard, void(uint64_t, uint64_t, uint32_t, Context *));
MOCK_METHOD1(aio_flush, void(Context *));
MOCK_METHOD2(aio_flush, void(librbd::io::FlushSource, Context *));
MOCK_METHOD5(aio_writesame_mock, void(uint64_t, uint64_t, ceph::bufferlist& bl,
int, Context *));
void aio_writesame(uint64_t off, uint64_t len, ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) {
aio_writesame_mock(off, len, bl, fadvise_flags, on_finish);
}
MOCK_METHOD6(aio_compare_and_write_mock, void(const Extents &,
const ceph::bufferlist &,
const ceph::bufferlist &,
uint64_t *, int, Context *));
void aio_compare_and_write(Extents&& image_extents, ceph::bufferlist&& cmp_bl,
ceph::bufferlist&& bl, uint64_t *mismatch_offset,
int fadvise_flags, Context *on_finish) {
aio_compare_and_write_mock(image_extents, cmp_bl, bl, mismatch_offset,
fadvise_flags, on_finish);
}
};
} // namespace cache
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_CACHE_MOCK_IMAGE_CACHE_H
| 2,329 | 38.491525 | 81 | h |
null | ceph-main/src/test/librbd/mock/crypto/MockCryptoInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_CRYPTO_INTERFACE_H
#define CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_CRYPTO_INTERFACE_H
#include "include/buffer.h"
#include "gmock/gmock.h"
#include "librbd/crypto/CryptoInterface.h"
namespace librbd {
namespace crypto {
struct MockCryptoInterface : CryptoInterface {
static const uint64_t BLOCK_SIZE = 4096;
static const uint64_t DATA_OFFSET = 4 * 1024 * 1024;
MOCK_METHOD2(encrypt, int(ceph::bufferlist*, uint64_t));
MOCK_METHOD2(decrypt, int(ceph::bufferlist*, uint64_t));
MOCK_CONST_METHOD0(get_key, const unsigned char*());
MOCK_CONST_METHOD0(get_key_length, int());
uint64_t get_block_size() const override {
return BLOCK_SIZE;
}
uint64_t get_data_offset() const override {
return DATA_OFFSET;
}
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_CRYPTO_INTERFACE_H
| 983 | 25.594595 | 70 | h |
null | ceph-main/src/test/librbd/mock/crypto/MockDataCryptor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_DATA_CRYPTOR_H
#define CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_DATA_CRYPTOR_H
#include "gmock/gmock.h"
#include "librbd/crypto/DataCryptor.h"
namespace librbd {
namespace crypto {
struct MockCryptoContext {};
class MockDataCryptor : public DataCryptor<MockCryptoContext> {
public:
uint32_t block_size = 16;
uint32_t iv_size = 16;
uint32_t get_block_size() const override {
return block_size;
}
uint32_t get_iv_size() const override {
return iv_size;
}
MOCK_METHOD1(get_context, MockCryptoContext*(CipherMode));
MOCK_METHOD2(return_context, void(MockCryptoContext*, CipherMode));
MOCK_CONST_METHOD3(init_context, int(MockCryptoContext*,
const unsigned char*, uint32_t));
MOCK_CONST_METHOD4(update_context, int(MockCryptoContext*,
const unsigned char*, unsigned char*,
uint32_t));
MOCK_CONST_METHOD0(get_key, const unsigned char*());
MOCK_CONST_METHOD0(get_key_length, int());
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_DATA_CRYPTOR_H
| 1,279 | 28.090909 | 78 | h |
null | ceph-main/src/test/librbd/mock/crypto/MockEncryptionFormat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_ENCRYPTION_FORMAT_H
#define CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_ENCRYPTION_FORMAT_H
#include "gmock/gmock.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/librbd/mock/crypto/MockCryptoInterface.h"
namespace librbd {
namespace crypto {
struct MockEncryptionFormat {
MOCK_CONST_METHOD0(clone, std::unique_ptr<MockEncryptionFormat>());
MOCK_METHOD2(format, void(MockImageCtx*, Context*));
MOCK_METHOD3(load, void(MockImageCtx*, std::string*, Context*));
MOCK_METHOD2(flatten, void(MockImageCtx*, Context*));
MOCK_METHOD0(get_crypto, MockCryptoInterface*());
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_CRYPTO_MOCK_ENCRYPTION_FORMAT_H
| 876 | 31.481481 | 70 | h |
null | ceph-main/src/test/librbd/mock/exclusive_lock/MockPolicy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_EXCLUSIVE_LOCK_POLICY_H
#define CEPH_TEST_LIBRBD_MOCK_EXCLUSIVE_LOCK_POLICY_H
#include "librbd/exclusive_lock/Policy.h"
#include <gmock/gmock.h>
namespace librbd {
namespace exclusive_lock {
struct MockPolicy : public Policy {
MOCK_METHOD0(may_auto_request_lock, bool());
MOCK_METHOD1(lock_requested, int(bool));
MOCK_METHOD1(accept_blocked_request, bool(OperationRequestType));
};
} // namespace exclusive_lock
} // librbd
#endif
| 572 | 22.875 | 70 | h |
null | ceph-main/src/test/librbd/mock/io/MockImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H
#define CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H
#include "gmock/gmock.h"
#include "include/Context.h"
#include "librbd/io/ImageDispatchInterface.h"
#include "librbd/io/Types.h"
class Context;
namespace librbd {
namespace io {
struct MockImageDispatch : public ImageDispatchInterface {
public:
MOCK_CONST_METHOD0(get_dispatch_layer, ImageDispatchLayer());
MOCK_METHOD1(shut_down, void(Context*));
bool read(
AioCompletion* aio_comp, Extents &&image_extents,
ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool write(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool discard(
AioCompletion* aio_comp, Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool write_same(
AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool compare_and_write(
AioCompletion* aio_comp, Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool flush(
AioCompletion* aio_comp, FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool list_snaps(
AioCompletion* aio_comp, Extents&& image_extents, SnapIds&& snap_ids,
int list_snaps_flags, SnapshotDelta* snapshot_delta,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
};
} // namespace io
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H
| 3,209 | 31.424242 | 77 | h |
null | ceph-main/src/test/librbd/mock/io/MockImageDispatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCHER_H
#define CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCHER_H
#include "gmock/gmock.h"
#include "include/Context.h"
#include "librbd/io/ImageDispatcher.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/Types.h"
class Context;
namespace librbd {
namespace io {
struct ImageDispatchInterface;
struct MockImageDispatcher : public ImageDispatcherInterface {
public:
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD1(register_dispatch, void(ImageDispatchInterface*));
MOCK_METHOD1(exists, bool(ImageDispatchLayer));
MOCK_METHOD2(shut_down_dispatch, void(ImageDispatchLayer, Context*));
MOCK_METHOD1(invalidate_cache, void(Context *));
MOCK_METHOD1(send, void(ImageDispatchSpec*));
MOCK_METHOD3(finish, void(int r, ImageDispatchLayer, uint64_t));
MOCK_METHOD1(apply_qos_schedule_tick_min, void(uint64_t));
MOCK_METHOD4(apply_qos_limit, void(uint64_t, uint64_t, uint64_t, uint64_t));
MOCK_METHOD1(apply_qos_exclude_ops, void(uint64_t));
MOCK_CONST_METHOD0(writes_blocked, bool());
MOCK_METHOD0(block_writes, int());
MOCK_METHOD1(block_writes, void(Context*));
MOCK_METHOD0(unblock_writes, void());
MOCK_METHOD1(wait_on_writes_unblocked, void(Context*));
MOCK_METHOD2(remap_to_physical, void(Extents&, ImageArea));
MOCK_METHOD1(remap_to_logical, ImageArea(Extents&));
};
} // namespace io
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCHER_H
| 1,563 | 29.666667 | 78 | h |
null | ceph-main/src/test/librbd/mock/io/MockObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCH_H
#define CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCH_H
#include "gmock/gmock.h"
#include "common/ceph_mutex.h"
#include "librbd/io/ObjectDispatchInterface.h"
#include "librbd/io/Types.h"
class Context;
namespace librbd {
namespace io {
struct MockObjectDispatch : public ObjectDispatchInterface {
public:
ceph::shared_mutex lock = ceph::make_shared_mutex("MockObjectDispatch::lock");
MockObjectDispatch() {}
MOCK_CONST_METHOD0(get_dispatch_layer, ObjectDispatchLayer());
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD6(execute_read,
bool(uint64_t, ReadExtents*, IOContext io_context, uint64_t*,
DispatchResult*, Context*));
bool read(
uint64_t object_no, ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace& parent_trace,
uint64_t* version, int* dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
return execute_read(object_no, extents, io_context, version,
dispatch_result, on_dispatched);
}
MOCK_METHOD9(execute_discard,
bool(uint64_t, uint64_t, uint64_t, IOContext, int,
int*, uint64_t*, DispatchResult*, Context*));
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return execute_discard(object_no, object_off, object_len, io_context,
discard_flags, dispatch_flags, journal_tid,
dispatch_result, on_dispatched);
}
MOCK_METHOD10(execute_write,
bool(uint64_t, uint64_t, const ceph::bufferlist&,
IOContext, int, std::optional<uint64_t>, int*,
uint64_t*, DispatchResult*, Context *));
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override {
return execute_write(object_no, object_off, data, io_context, write_flags,
assert_version, dispatch_flags, journal_tid,
dispatch_result, on_dispatched);
}
MOCK_METHOD10(execute_write_same,
bool(uint64_t, uint64_t, uint64_t,
const LightweightBufferExtents&,
const ceph::bufferlist&, IOContext, int*,
uint64_t*, DispatchResult*, Context *));
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* dispatch_flags,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context* *on_finish, Context* on_dispatched) override {
return execute_write_same(object_no, object_off, object_len, buffer_extents,
data, io_context, dispatch_flags, journal_tid,
dispatch_result, on_dispatched);
}
MOCK_METHOD9(execute_compare_and_write,
bool(uint64_t, uint64_t, const ceph::bufferlist&,
const ceph::bufferlist&, uint64_t*, int*, uint64_t*,
DispatchResult*, Context *));
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* dispatch_flags, uint64_t* journal_tid,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return execute_compare_and_write(object_no, object_off, cmp_data,
write_data, mismatch_offset,
dispatch_flags, journal_tid,
dispatch_result, on_dispatched);
}
MOCK_METHOD4(execute_flush, bool(FlushSource, uint64_t*, DispatchResult*,
Context*));
bool flush(FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return execute_flush(flush_source, journal_tid, dispatch_result,
on_dispatched);
}
MOCK_METHOD7(execute_list_snaps, bool(uint64_t, const Extents&,
const SnapIds&, int, SnapshotDelta*,
DispatchResult*, Context*));
bool list_snaps(
uint64_t object_no, io::Extents&& extents, SnapIds&& snap_ids,
int list_snaps_flags, const ZTracer::Trace &parent_trace,
SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return execute_list_snaps(object_no, extents, snap_ids, list_snaps_flags,
snapshot_delta, dispatch_result, on_dispatched);
}
MOCK_METHOD1(invalidate_cache, bool(Context*));
MOCK_METHOD1(reset_existence_cache, bool(Context*));
MOCK_METHOD5(extent_overwritten, void(uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t));
MOCK_METHOD2(prepare_copyup, int(uint64_t, SnapshotSparseBufferlist*));
};
} // namespace io
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCH_H
| 6,074 | 43.021739 | 80 | h |
null | ceph-main/src/test/librbd/mock/io/MockObjectDispatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCHER_H
#define CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCHER_H
#include "gmock/gmock.h"
#include "include/Context.h"
#include "librbd/io/ObjectDispatcher.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/Types.h"
class Context;
namespace librbd {
namespace io {
struct ObjectDispatchInterface;
struct MockObjectDispatcher : public ObjectDispatcherInterface {
public:
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD1(register_dispatch, void(ObjectDispatchInterface*));
MOCK_METHOD1(exists, bool(ObjectDispatchLayer));
MOCK_METHOD2(shut_down_dispatch, void(ObjectDispatchLayer, Context*));
MOCK_METHOD2(flush, void(FlushSource, Context*));
MOCK_METHOD1(invalidate_cache, void(Context*));
MOCK_METHOD1(reset_existence_cache, void(Context*));
MOCK_METHOD5(extent_overwritten, void(uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t));
MOCK_METHOD2(prepare_copyup, int(uint64_t, SnapshotSparseBufferlist*));
MOCK_METHOD1(send, void(ObjectDispatchSpec*));
};
} // namespace io
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IO_OBJECT_DISPATCHER_H
| 1,281 | 27.488889 | 79 | h |
null | ceph-main/src/test/librbd/mock/io/MockQosImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_IO_QOS_IMAGE_DISPATCH_H
#define CEPH_TEST_LIBRBD_MOCK_IO_QOS_IMAGE_DISPATCH_H
#include "gmock/gmock.h"
#include "librbd/io/Types.h"
#include <atomic>
struct Context;
namespace librbd {
namespace io {
struct MockQosImageDispatch {
MOCK_METHOD4(needs_throttle, bool(bool, const Extents&,
std::atomic<uint32_t>*, Context*));
};
} // namespace io
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_IO_QOS_IMAGE_DISPATCH_H
| 594 | 22.8 | 71 | h |
null | ceph-main/src/test/librbd/mock/migration/MockSnapshotInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_SNAPSHOT_INTERFACE_H
#define CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_SNAPSHOT_INTERFACE_H
#include "include/buffer.h"
#include "gmock/gmock.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include "librbd/migration/SnapshotInterface.h"
namespace librbd {
namespace migration {
struct MockSnapshotInterface : public SnapshotInterface {
MOCK_METHOD2(open, void(SnapshotInterface*, Context*));
MOCK_METHOD1(close, void(Context*));
MOCK_CONST_METHOD0(get_snap_info, const SnapInfo&());
MOCK_METHOD3(read, void(io::AioCompletion*, const io::Extents&,
io::ReadResult&));
void read(io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::ReadResult&& read_result, int op_flags, int read_flags,
const ZTracer::Trace &parent_trace) override {
read(aio_comp, image_extents, read_result);
}
MOCK_METHOD3(list_snap, void(const io::Extents&, io::SparseExtents*,
Context*));
void list_snap(io::Extents&& image_extents, int list_snaps_flags,
io::SparseExtents* sparse_extents,
const ZTracer::Trace &parent_trace,
Context* on_finish) override {
list_snap(image_extents, sparse_extents, on_finish);
}
};
} // namespace migration
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_SNAPSHOT_INTERFACE_H
| 1,570 | 33.911111 | 71 | h |
null | ceph-main/src/test/librbd/mock/migration/MockStreamInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_STREAM_INTERFACE_H
#define CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_STREAM_INTERFACE_H
#include "include/buffer.h"
#include "gmock/gmock.h"
#include "librbd/migration/StreamInterface.h"
namespace librbd {
namespace migration {
struct MockStreamInterface : public StreamInterface {
MOCK_METHOD1(open, void(Context*));
MOCK_METHOD1(close, void(Context*));
MOCK_METHOD2(get_size, void(uint64_t*, Context*));
MOCK_METHOD3(read, void(const io::Extents&, bufferlist*, Context*));
void read(io::Extents&& byte_extents, bufferlist* bl, Context* on_finish) {
read(byte_extents, bl, on_finish);
}
};
} // namespace migration
} // namespace librbd
#endif // CEPH_TEST_LIBRBD_MOCK_MIGRATION_MOCK_STREAM_INTERFACE_H
| 866 | 27.9 | 77 | h |
null | ceph-main/src/test/librbd/object_map/mock/MockInvalidateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/object_map/InvalidateRequest.h"
// template definitions
#include "librbd/object_map/InvalidateRequest.cc"
namespace librbd {
namespace object_map {
template <typename I>
struct MockInvalidateRequestBase {
static std::list<InvalidateRequest<I>*> s_requests;
uint64_t snap_id = 0;
bool force = false;
Context *on_finish = nullptr;
static InvalidateRequest<I>* create(I &image_ctx, uint64_t snap_id,
bool force, Context *on_finish) {
ceph_assert(!s_requests.empty());
InvalidateRequest<I>* req = s_requests.front();
req->snap_id = snap_id;
req->force = force;
req->on_finish = on_finish;
s_requests.pop_front();
return req;
}
MockInvalidateRequestBase() {
s_requests.push_back(static_cast<InvalidateRequest<I>*>(this));
}
MOCK_METHOD0(send, void());
};
template <typename I>
std::list<InvalidateRequest<I>*> MockInvalidateRequestBase<I>::s_requests;
} // namespace object_map
} // namespace librbd
| 1,107 | 25.380952 | 74 | h |
null | ceph-main/src/test/neorados/common_tests.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <string>
#include <string_view>
#include "include/neorados/RADOS.hpp"
std::string get_temp_pool_name(std::string_view prefix = {});
template<typename CompletionToken>
auto create_pool(neorados::RADOS& r, std::string_view pname,
CompletionToken&& token)
{
boost::asio::async_completion<CompletionToken,
void(boost::system::error_code,
std::int64_t)> init(token);
r.create_pool(pname, std::nullopt,
[&r, pname = std::string(pname),
h = std::move(init.completion_handler)]
(boost::system::error_code ec) mutable {
r.lookup_pool(
pname,
[h = std::move(h)]
(boost::system::error_code ec, std::int64_t pool) mutable {
std::move(h)(ec, pool);
});
});
return init.result.get();
}
| 1,202 | 27.642857 | 70 | h |
null | ceph-main/src/test/objectstore/ObjectStoreImitator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Fragmentation Simulator
* Author: Tri Dao, [email protected]
*/
#pragma once
#include "include/common_fwd.h"
#include "os/ObjectStore.h"
#include "os/bluestore/Allocator.h"
#include "os/bluestore/bluestore_types.h"
#include <algorithm>
#include <boost/smart_ptr/intrusive_ptr.hpp>
/**
* ObjectStoreImitator will simulate how BlueStore does IO (as of the time
* the simulator is written) and assess the defragmentation levels of different
* allocators. As the main concern of the simulator is allocators, it mainly
* focuses on operations that triggers IOs and tries to simplify the rest as
* much as possible(caches, memory buffers).
*
* The simulator inherited from ObjectStore and tries to simulate BlueStore as
* much as possible.
*
* #Note: This is an allocation simulator not a data consistency simulator so
* any object data is not stored.
*/
class ObjectStoreImitator : public ObjectStore {
private:
class Collection;
typedef boost::intrusive_ptr<Collection> CollectionRef;
struct Object : public RefCountedObject {
Collection *c;
ghobject_t oid;
bool exists;
uint64_t nid;
uint64_t size;
uint32_t alloc_hint_flags = 0;
uint32_t expected_object_size = 0;
uint32_t expected_write_size = 0;
// We assume these extents are sorted according by "logical" order.
PExtentVector extents;
Object(Collection *c_, const ghobject_t &oid_, bool exists_ = false,
uint64_t nid_ = 0, uint64_t size_ = 0)
: c(c_), oid(oid_), exists(exists_), nid(nid_), size(size_) {}
void punch_hole(uint64_t offset, uint64_t length,
PExtentVector &old_extents) {
if (offset >= size || length == 0)
return;
if (offset + length >= size) {
length = size - offset;
}
uint64_t l_offset{0}, punched_length{0};
PExtentVector to_be_punched, remains;
for (auto e : extents) {
if (l_offset > offset && l_offset - length >= offset)
break;
// Found where we need to punch
if (l_offset >= offset) {
// We only punched a portion of the extent
if (e.length + punched_length > length) {
uint64_t left = e.length + punched_length - length;
e.length = length - punched_length;
remains.emplace_back(e.offset + e.length, left);
}
to_be_punched.push_back(e);
punched_length += e.length;
} else { // else the extent will remain
remains.push_back(e);
}
l_offset += e.length;
}
size -= punched_length;
extents = remains;
old_extents = to_be_punched;
}
void append(PExtentVector &ext) {
for (auto &e : ext) {
extents.push_back(e);
size += e.length;
}
std::sort(extents.begin(), extents.end(),
[](bluestore_pextent_t &a, bluestore_pextent_t &b) {
return a.offset < b.offset;
});
}
void verify_extents() {
uint64_t total{0};
for (auto &e : extents) {
ceph_assert(total <= e.offset);
ceph_assert(e.length > 0);
total += e.length;
}
ceph_assert(total == size);
}
};
typedef boost::intrusive_ptr<Object> ObjectRef;
struct Collection : public CollectionImpl {
bluestore_cnode_t cnode;
std::map<ghobject_t, ObjectRef> objects;
ceph::shared_mutex lock = ceph::make_shared_mutex(
"FragmentationSimulator::Collection::lock", true, false);
// Lock for 'objects'
ceph::recursive_mutex obj_lock = ceph::make_recursive_mutex(
"FragmentationSimulator::Collection::obj_lock");
bool exists;
// pool options
pool_opts_t pool_opts;
ContextQueue *commit_queue;
bool contains(const ghobject_t &oid) {
if (cid.is_meta())
return oid.hobj.pool == -1;
spg_t spgid;
if (cid.is_pg(&spgid))
return spgid.pgid.contains(cnode.bits, oid) &&
oid.shard_id == spgid.shard;
return false;
}
int64_t pool() const { return cid.pool(); }
ObjectRef get_obj(const ghobject_t &oid, bool create) {
ceph_assert(create ? ceph_mutex_is_wlocked(lock)
: ceph_mutex_is_locked(lock));
spg_t pgid;
if (cid.is_pg(&pgid) && !oid.match(cnode.bits, pgid.ps())) {
ceph_abort();
}
auto o = objects.find(oid);
if (o != objects.end())
return o->second;
if (!create)
return nullptr;
return objects[oid] = new Object(this, oid);
}
bool flush_commit(Context *c) override { return false; }
void flush() override {}
void rename_obj(ObjectRef &oldo, const ghobject_t &old_oid,
const ghobject_t &new_oid) {
std::lock_guard l(obj_lock);
auto po = objects.find(old_oid);
auto pn = objects.find(new_oid);
ceph_assert(po != pn);
ceph_assert(po != objects.end());
if (pn != objects.end()) {
objects.erase(pn);
}
ObjectRef o = po->second;
oldo.reset(new Object(o->c, old_oid));
po->second = oldo;
objects.insert(std::make_pair(new_oid, o));
o->oid = new_oid;
}
void verify_objects() {
for (auto &[_, obj] : objects) {
obj->verify_extents();
}
}
Collection(ObjectStoreImitator *sim_, coll_t cid_)
: CollectionImpl(sim_->cct, cid_), exists(true), commit_queue(nullptr) {
}
};
CollectionRef _get_collection(const coll_t &cid);
int _split_collection(CollectionRef &c, CollectionRef &d, unsigned bits,
int rem);
int _merge_collection(CollectionRef *c, CollectionRef &d, unsigned bits);
int _collection_list(Collection *c, const ghobject_t &start,
const ghobject_t &end, int max, bool legacy,
std::vector<ghobject_t> *ls, ghobject_t *next);
int _remove_collection(const coll_t &cid, CollectionRef *c);
void _do_remove_collection(CollectionRef *c);
int _create_collection(const coll_t &cid, unsigned bits, CollectionRef *c);
// Transactions
void _add_transaction(Transaction *t);
// Object ops
int _write(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length,
bufferlist &bl, uint32_t fadvise_flags);
int _set_alloc_hint(CollectionRef &c, ObjectRef &o,
uint64_t expected_object_size,
uint64_t expected_write_size, uint32_t flags);
int _rename(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo,
const ghobject_t &new_oid);
int _clone(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo);
int _clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo,
uint64_t srcoff, uint64_t length, uint64_t dstoff);
int read(CollectionHandle &c, const ghobject_t &oid, uint64_t offset,
size_t len, ceph::buffer::list &bl, uint32_t op_flags = 0) override;
// Helpers
void _assign_nid(ObjectRef &o);
int _do_write(CollectionRef &c, ObjectRef &o, uint64_t offset,
uint64_t length, ceph::buffer::list &bl,
uint32_t fadvise_flags);
int _do_alloc_write(CollectionRef c, ObjectRef &o, bufferlist &bl);
void _do_truncate(CollectionRef &c, ObjectRef &o, uint64_t offset);
int _do_zero(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length);
int _do_clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo,
uint64_t srcoff, uint64_t length, uint64_t dstoff);
int _do_read(Collection *c, ObjectRef &o, uint64_t offset, size_t len,
ceph::buffer::list &bl, uint32_t op_flags = 0,
uint64_t retry_count = 0);
// Members
boost::scoped_ptr<Allocator> alloc;
std::atomic<uint64_t> nid_last = {0};
uint64_t min_alloc_size; ///< minimum allocation unit (power of 2)
static_assert(std::numeric_limits<uint8_t>::max() >
std::numeric_limits<decltype(min_alloc_size)>::digits,
"not enough bits for min_alloc_size");
///< rwlock to protect coll_map/new_coll_map
ceph::shared_mutex coll_lock =
ceph::make_shared_mutex("FragmentationSimulator::coll_lock");
std::unordered_map<coll_t, CollectionRef> coll_map;
std::unordered_map<coll_t, CollectionRef>
new_coll_map; // store collections that is opened via open_new_collection
// but a create txn has not executed
public:
ObjectStoreImitator(CephContext *cct, const std::string &path_,
uint64_t min_alloc_size_)
: ObjectStore(cct, path_), alloc(nullptr),
min_alloc_size(min_alloc_size_) {}
~ObjectStoreImitator() = default;
void init_alloc(const std::string &alloc_type, uint64_t size);
void print_status();
void verify_objects(CollectionHandle &ch);
// Overrides
// This is often not called directly but through queue_transaction
int queue_transactions(CollectionHandle &ch, std::vector<Transaction> &tls,
TrackedOpRef op = TrackedOpRef(),
ThreadPool::TPHandle *handle = NULL) override;
CollectionHandle open_collection(const coll_t &cid) override;
CollectionHandle create_new_collection(const coll_t &cid) override;
void set_collection_commit_queue(const coll_t &cid,
ContextQueue *commit_queue) override;
bool exists(CollectionHandle &c, const ghobject_t &old) override;
int set_collection_opts(CollectionHandle &c,
const pool_opts_t &opts) override;
int list_collections(std::vector<coll_t> &ls) override;
bool collection_exists(const coll_t &c) override;
int collection_empty(CollectionHandle &c, bool *empty) override;
int collection_bits(CollectionHandle &c) override;
int collection_list(CollectionHandle &c, const ghobject_t &start,
const ghobject_t &end, int max,
std::vector<ghobject_t> *ls, ghobject_t *next) override;
// Not used but implemented so it compiles
std::string get_type() override { return "ObjectStoreImitator"; }
bool test_mount_in_use() override { return false; }
int mount() override { return 0; }
int umount() override { return 0; }
int validate_hobject_key(const hobject_t &obj) const override { return 0; }
unsigned get_max_attr_name_length() override { return 256; }
int mkfs() override { return 0; }
int mkjournal() override { return 0; }
bool needs_journal() override { return false; }
bool wants_journal() override { return false; }
bool allows_journal() override { return false; }
int statfs(struct store_statfs_t *buf,
osd_alert_list_t *alerts = nullptr) override {
return 0;
}
int pool_statfs(uint64_t pool_id, struct store_statfs_t *buf,
bool *per_pool_omap) override {
return 0;
}
int stat(CollectionHandle &c, const ghobject_t &oid, struct stat *st,
bool allow_eio = false) override {
return 0;
}
int fiemap(CollectionHandle &c, const ghobject_t &oid, uint64_t offset,
size_t len, ceph::buffer::list &bl) override {
return 0;
}
int fiemap(CollectionHandle &c, const ghobject_t &oid, uint64_t offset,
size_t len, std::map<uint64_t, uint64_t> &destmap) override {
return 0;
}
int getattr(CollectionHandle &c, const ghobject_t &oid, const char *name,
ceph::buffer::ptr &value) override {
return 0;
}
int getattrs(
CollectionHandle &c, const ghobject_t &oid,
std::map<std::string, ceph::buffer::ptr, std::less<>> &aset) override {
return 0;
}
int omap_get(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
std::map<std::string, ceph::buffer::list>
*out /// < [out] Key to value std::map
) override {
return 0;
}
int omap_get_header(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
bool allow_eio = false ///< [in] don't assert on eio
) override {
return 0;
}
int omap_get_keys(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
std::set<std::string> *keys ///< [out] Keys defined on oid
) override {
return 0;
}
int omap_get_values(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to get
std::map<std::string, ceph::buffer::list>
*out ///< [out] Returned keys and values
) override {
return 0;
}
int omap_check_keys(
CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to check
std::set<std::string> *out ///< [out] Subset of keys defined on oid
) override {
return 0;
}
ObjectMap::ObjectMapIterator
get_omap_iterator(CollectionHandle &c, ///< [in] collection
const ghobject_t &oid ///< [in] object
) override {
return {};
}
void set_fsid(uuid_d u) override {}
uuid_d get_fsid() override { return {}; }
uint64_t estimate_objects_overhead(uint64_t num_objects) override {
return num_objects * 300;
}
objectstore_perf_stat_t get_cur_stats() override { return {}; }
const PerfCounters *get_perf_counters() const override { return nullptr; };
};
| 14,067 | 35.54026 | 80 | h |
null | ceph-main/src/test/objectstore/TestObjectStoreState.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef TEST_OBJECTSTORE_STATE_H_
#define TEST_OBJECTSTORE_STATE_H_
#include <boost/scoped_ptr.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int.hpp>
#include <map>
#include <vector>
#include "os/ObjectStore.h"
#include "common/Cond.h"
typedef boost::mt11213b rngen_t;
class TestObjectStoreState {
public:
struct coll_entry_t {
spg_t m_pgid;
coll_t m_cid;
ghobject_t m_meta_obj;
ObjectStore::CollectionHandle m_ch;
std::map<int, hobject_t*> m_objects;
int m_next_object_id;
coll_entry_t(spg_t pgid, ObjectStore::CollectionHandle& ch,
char *meta_obj_buf)
: m_pgid(pgid),
m_cid(m_pgid),
m_meta_obj(hobject_t(sobject_t(object_t(meta_obj_buf), CEPH_NOSNAP))),
m_ch(ch),
m_next_object_id(0) {
m_meta_obj.hobj.pool = m_pgid.pool();
m_meta_obj.hobj.set_hash(m_pgid.ps());
}
~coll_entry_t();
hobject_t *touch_obj(int id);
bool check_for_obj(int id);
hobject_t *get_obj(int id);
hobject_t *remove_obj(int id);
hobject_t *get_obj_at(int pos, int *key = NULL);
hobject_t *remove_obj_at(int pos, int *key = NULL);
hobject_t *replace_obj(int id, hobject_t *obj);
int get_random_obj_id(rngen_t& gen);
private:
hobject_t *get_obj(int id, bool remove);
hobject_t *get_obj_at(int pos, bool remove, int *key = NULL);
};
protected:
boost::shared_ptr<ObjectStore> m_store;
std::map<coll_t, coll_entry_t*> m_collections;
std::vector<coll_t> m_collections_ids;
int m_next_coll_nr;
int m_num_objs_per_coll;
int m_num_objects;
int m_max_in_flight;
std::atomic<int> m_in_flight = { 0 };
ceph::mutex m_finished_lock = ceph::make_mutex("Finished Lock");
ceph::condition_variable m_finished_cond;
void rebuild_id_vec() {
m_collections_ids.clear();
m_collections_ids.reserve(m_collections.size());
for (auto& i : m_collections) {
m_collections_ids.push_back(i.first);
}
}
void wait_for_ready() {
std::unique_lock locker{m_finished_lock};
m_finished_cond.wait(locker, [this] {
return m_max_in_flight <= 0 || m_in_flight < m_max_in_flight;
});
}
void wait_for_done() {
std::unique_lock locker{m_finished_lock};
m_finished_cond.wait(locker, [this] { return m_in_flight == 0; });
}
void set_max_in_flight(int max) {
m_max_in_flight = max;
}
void set_num_objs_per_coll(int val) {
m_num_objs_per_coll = val;
}
coll_entry_t *get_coll(coll_t cid, bool erase = false);
coll_entry_t *get_coll_at(int pos, bool erase = false);
int get_next_pool_id() { return m_next_pool++; }
private:
static const int m_default_num_colls = 30;
// The pool ID used for collection creation, ID 0 is preserve for other tests
int m_next_pool;
public:
explicit TestObjectStoreState(ObjectStore *store) :
m_next_coll_nr(0), m_num_objs_per_coll(10), m_num_objects(0),
m_max_in_flight(0), m_next_pool(2) {
m_store.reset(store);
}
~TestObjectStoreState() {
auto it = m_collections.begin();
while (it != m_collections.end()) {
if (it->second)
delete it->second;
m_collections.erase(it++);
}
}
void init(int colls, int objs);
void init() {
init(m_default_num_colls, 0);
}
int inc_in_flight() {
return ++m_in_flight;
}
int dec_in_flight() {
return --m_in_flight;
}
coll_entry_t *coll_create(spg_t pgid, ObjectStore::CollectionHandle ch);
class C_OnFinished: public Context {
protected:
TestObjectStoreState *m_state;
public:
explicit C_OnFinished(TestObjectStoreState *state) : m_state(state) { }
void finish(int r) override {
std::lock_guard locker{m_state->m_finished_lock};
m_state->dec_in_flight();
m_state->m_finished_cond.notify_all();
}
};
};
#endif /* TEST_OBJECTSTORE_STATE_H_ */
| 4,213 | 25.503145 | 79 | h |
null | ceph-main/src/test/objectstore/store_test_fixture.h | #include <string>
#include <stack>
#include <memory>
#include <gtest/gtest.h>
#include "common/config_fwd.h"
class ObjectStore;
class StoreTestFixture : virtual public ::testing::Test {
const std::string type;
const std::string data_dir;
std::stack<std::pair<std::string, std::string>> saved_settings;
ConfigProxy* conf = nullptr;
std::string orig_death_test_style;
public:
std::unique_ptr<ObjectStore> store;
ObjectStore::CollectionHandle ch;
explicit StoreTestFixture(const std::string& type)
: type(type), data_dir(type + ".test_temp_dir")
{}
void SetUp() override;
void TearDown() override;
void SetDeathTestStyle(const char* new_style) {
if (orig_death_test_style.empty()) {
orig_death_test_style = ::testing::FLAGS_gtest_death_test_style;
}
::testing::FLAGS_gtest_death_test_style = new_style;
}
void SetVal(ConfigProxy& conf, const char* key, const char* val);
struct SettingsBookmark {
StoreTestFixture& s;
size_t pos;
SettingsBookmark(StoreTestFixture& _s, size_t p) : s(_s), pos(p)
{}
~SettingsBookmark() {
s.PopSettings(pos);
}
};
SettingsBookmark BookmarkSettings() {
return SettingsBookmark(*this, saved_settings.size());
}
void PopSettings(size_t);
void CloseAndReopen();
};
| 1,295 | 23.45283 | 70 | h |
null | ceph-main/src/test/osd/Object.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "include/interval_set.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include <list>
#include <map>
#include <set>
#include <stack>
#include <random>
#ifndef OBJECT_H
#define OBJECT_H
/// describes an object
class ContDesc {
public:
int objnum;
int cursnap;
unsigned seqnum;
std::string prefix;
std::string oid;
ContDesc() :
objnum(0), cursnap(0),
seqnum(0), prefix("") {}
ContDesc(int objnum,
int cursnap,
unsigned seqnum,
const std::string &prefix) :
objnum(objnum), cursnap(cursnap),
seqnum(seqnum), prefix(prefix) {}
bool operator==(const ContDesc &rhs) {
return (rhs.objnum == objnum &&
rhs.cursnap == cursnap &&
rhs.seqnum == seqnum &&
rhs.prefix == prefix &&
rhs.oid == oid);
}
bool operator<(const ContDesc &rhs) const {
return seqnum < rhs.seqnum;
}
bool operator!=(const ContDesc &rhs) {
return !((*this) == rhs);
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &bp);
};
WRITE_CLASS_ENCODER(ContDesc)
std::ostream &operator<<(std::ostream &out, const ContDesc &rhs);
class ChunkDesc {
public:
uint32_t offset;
uint32_t length;
std::string oid;
};
class ContentsGenerator {
public:
class iterator_impl {
public:
virtual char operator*() = 0;
virtual iterator_impl &operator++() = 0;
virtual void seek(uint64_t pos) = 0;
virtual bool end() = 0;
virtual ContDesc get_cont() const = 0;
virtual uint64_t get_pos() const = 0;
virtual bufferlist gen_bl_advance(uint64_t s) {
bufferptr ret = buffer::create(s);
for (uint64_t i = 0; i < s; ++i, ++(*this)) {
ret[i] = **this;
}
bufferlist _ret;
_ret.push_back(ret);
return _ret;
}
/// walk through given @c bl
///
/// @param[out] off the offset of the first byte which does not match
/// @returns true if @c bl matches with the content, false otherwise
virtual bool check_bl_advance(bufferlist &bl, uint64_t *off = nullptr) {
uint64_t _off = 0;
for (bufferlist::iterator i = bl.begin();
!i.end();
++i, ++_off, ++(*this)) {
if (*i != **this) {
if (off)
*off = _off;
return false;
}
}
return true;
}
virtual ~iterator_impl() {};
};
class iterator {
public:
ContentsGenerator *parent;
iterator_impl *impl;
char operator *() { return **impl; }
iterator &operator++() { ++(*impl); return *this; };
void seek(uint64_t pos) { impl->seek(pos); }
bool end() { return impl->end(); }
~iterator() { parent->put_iterator_impl(impl); }
iterator(const iterator &rhs) : parent(rhs.parent) {
impl = parent->dup_iterator_impl(rhs.impl);
}
iterator &operator=(const iterator &rhs) {
iterator new_iter(rhs);
swap(new_iter);
return *this;
}
void swap(iterator &other) {
ContentsGenerator *otherparent = other.parent;
other.parent = parent;
parent = otherparent;
iterator_impl *otherimpl = other.impl;
other.impl = impl;
impl = otherimpl;
}
bufferlist gen_bl_advance(uint64_t s) {
return impl->gen_bl_advance(s);
}
bool check_bl_advance(bufferlist &bl, uint64_t *off = nullptr) {
return impl->check_bl_advance(bl, off);
}
iterator(ContentsGenerator *parent, iterator_impl *impl) :
parent(parent), impl(impl) {}
};
virtual uint64_t get_length(const ContDesc &in) = 0;
virtual void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) = 0;
void get_ranges(const ContDesc &cont, interval_set<uint64_t> &out) {
std::map<uint64_t, uint64_t> ranges;
get_ranges_map(cont, ranges);
for (std::map<uint64_t, uint64_t>::iterator i = ranges.begin();
i != ranges.end();
++i) {
out.insert(i->first, i->second);
}
}
virtual iterator_impl *get_iterator_impl(const ContDesc &in) = 0;
virtual iterator_impl *dup_iterator_impl(const iterator_impl *in) = 0;
virtual void put_iterator_impl(iterator_impl *in) = 0;
virtual ~ContentsGenerator() {};
iterator get_iterator(const ContDesc &in) {
return iterator(this, get_iterator_impl(in));
}
};
class RandGenerator : public ContentsGenerator {
public:
typedef std::minstd_rand0 RandWrap;
class iterator_impl : public ContentsGenerator::iterator_impl {
public:
uint64_t pos;
ContDesc cont;
RandWrap rand;
RandGenerator *cont_gen;
char current;
iterator_impl(const ContDesc &cont, RandGenerator *cont_gen) :
pos(0), cont(cont), rand(cont.seqnum), cont_gen(cont_gen) {
current = rand();
}
ContDesc get_cont() const override { return cont; }
uint64_t get_pos() const override { return pos; }
iterator_impl &operator++() override {
pos++;
current = rand();
return *this;
}
char operator*() override {
return current;
}
void seek(uint64_t _pos) override {
if (_pos < pos) {
iterator_impl begin = iterator_impl(cont, cont_gen);
begin.seek(_pos);
*this = begin;
}
while (pos < _pos) {
++(*this);
}
}
bool end() override {
return pos >= cont_gen->get_length(cont);
}
};
ContentsGenerator::iterator_impl *get_iterator_impl(const ContDesc &in) override {
RandGenerator::iterator_impl *i = new iterator_impl(in, this);
return i;
}
void put_iterator_impl(ContentsGenerator::iterator_impl *in) override {
delete in;
}
ContentsGenerator::iterator_impl *dup_iterator_impl(
const ContentsGenerator::iterator_impl *in) override {
ContentsGenerator::iterator_impl *retval = get_iterator_impl(in->get_cont());
retval->seek(in->get_pos());
return retval;
}
};
class VarLenGenerator : public RandGenerator {
uint64_t max_length;
uint64_t min_stride_size;
uint64_t max_stride_size;
public:
VarLenGenerator(
uint64_t length, uint64_t min_stride_size, uint64_t max_stride_size) :
max_length(length),
min_stride_size(min_stride_size),
max_stride_size(max_stride_size) {}
void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override;
uint64_t get_length(const ContDesc &in) override {
RandWrap rand(in.seqnum);
if (max_length == 0)
return 0;
return (rand() % (max_length/2)) + ((max_length - 1)/2) + 1;
}
};
class AttrGenerator : public RandGenerator {
uint64_t max_len;
uint64_t big_max_len;
public:
AttrGenerator(uint64_t max_len, uint64_t big_max_len)
: max_len(max_len), big_max_len(big_max_len) {}
void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override {
out.insert(std::pair<uint64_t, uint64_t>(0, get_length(cont)));
}
uint64_t get_length(const ContDesc &in) override {
RandWrap rand(in.seqnum);
// make some attrs big
if (in.seqnum & 3)
return (rand() % max_len);
else
return (rand() % big_max_len);
}
bufferlist gen_bl(const ContDesc &in) {
bufferlist bl;
for (iterator i = get_iterator(in); !i.end(); ++i) {
bl.append(*i);
}
ceph_assert(bl.length() < big_max_len);
return bl;
}
};
class AppendGenerator : public RandGenerator {
uint64_t off;
uint64_t alignment;
uint64_t min_append_size;
uint64_t max_append_size;
uint64_t max_append_total;
uint64_t round_up(uint64_t in, uint64_t by) {
if (by)
in += (by - (in % by));
return in;
}
public:
AppendGenerator(
uint64_t off,
uint64_t alignment,
uint64_t min_append_size,
uint64_t _max_append_size,
uint64_t max_append_multiple) :
off(off), alignment(alignment),
min_append_size(round_up(min_append_size, alignment)),
max_append_size(round_up(_max_append_size, alignment)) {
if (_max_append_size == min_append_size)
max_append_size += alignment;
max_append_total = max_append_multiple * max_append_size;
}
uint64_t get_append_size(const ContDesc &in) {
RandWrap rand(in.seqnum);
return round_up(rand() % max_append_total, alignment);
}
uint64_t get_length(const ContDesc &in) override {
return off + get_append_size(in);
}
void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override;
};
class ObjectDesc {
public:
ObjectDesc()
: exists(false), dirty(false),
version(0), flushed(false) {}
ObjectDesc(const ContDesc &init, ContentsGenerator *cont_gen)
: exists(false), dirty(false),
version(0), flushed(false) {
layers.push_front(std::pair<std::shared_ptr<ContentsGenerator>, ContDesc>(std::shared_ptr<ContentsGenerator>(cont_gen), init));
}
class iterator {
public:
uint64_t pos;
uint64_t size;
uint64_t cur_valid_till;
class ContState {
interval_set<uint64_t> ranges;
const uint64_t size;
public:
ContDesc cont;
std::shared_ptr<ContentsGenerator> gen;
ContentsGenerator::iterator iter;
ContState(
const ContDesc &_cont,
std::shared_ptr<ContentsGenerator> _gen,
ContentsGenerator::iterator _iter)
: size(_gen->get_length(_cont)), cont(_cont), gen(_gen), iter(_iter) {
gen->get_ranges(cont, ranges);
}
const interval_set<uint64_t> &get_ranges() {
return ranges;
}
uint64_t get_size() {
return gen->get_length(cont);
}
bool covers(uint64_t pos) {
return ranges.contains(pos) || (!ranges.starts_after(pos) && pos >= size);
}
uint64_t next(uint64_t pos) {
ceph_assert(!covers(pos));
return ranges.starts_after(pos) ? ranges.start_after(pos) : size;
}
uint64_t valid_till(uint64_t pos) {
ceph_assert(covers(pos));
return ranges.contains(pos) ?
ranges.end_after(pos) :
std::numeric_limits<uint64_t>::max();
}
};
// from latest to earliest
using layers_t = std::vector<ContState>;
layers_t layers;
struct StackState {
const uint64_t next;
const uint64_t size;
};
std::stack<std::pair<layers_t::iterator, StackState> > stack;
layers_t::iterator current;
explicit iterator(ObjectDesc &obj) :
pos(0),
size(obj.layers.begin()->first->get_length(obj.layers.begin()->second)),
cur_valid_till(0) {
for (auto &&i : obj.layers) {
layers.push_back({i.second, i.first, i.first->get_iterator(i.second)});
}
current = layers.begin();
adjust_stack();
}
void adjust_stack();
iterator &operator++() {
ceph_assert(cur_valid_till >= pos);
++pos;
if (pos >= cur_valid_till) {
adjust_stack();
}
return *this;
}
char operator*() {
if (current == layers.end()) {
return '\0';
} else {
return pos >= size ? '\0' : *(current->iter);
}
}
bool end() {
return pos >= size;
}
// advance @c pos to given position
void seek(uint64_t _pos) {
if (_pos < pos) {
ceph_abort();
}
while (pos < _pos) {
ceph_assert(cur_valid_till >= pos);
uint64_t next = std::min(_pos - pos, cur_valid_till - pos);
pos += next;
if (pos >= cur_valid_till) {
ceph_assert(pos == cur_valid_till);
adjust_stack();
}
}
ceph_assert(pos == _pos);
}
// grab the bytes in the range of [pos, pos+s), and advance @c pos
//
// @returns the bytes in the specified range
bufferlist gen_bl_advance(uint64_t s) {
bufferlist ret;
while (s > 0) {
ceph_assert(cur_valid_till >= pos);
uint64_t next = std::min(s, cur_valid_till - pos);
if (current != layers.end() && pos < size) {
ret.append(current->iter.gen_bl_advance(next));
} else {
ret.append_zero(next);
}
pos += next;
ceph_assert(next <= s);
s -= next;
if (pos >= cur_valid_till) {
ceph_assert(cur_valid_till == pos);
adjust_stack();
}
}
return ret;
}
// compare the range of [pos, pos+bl.length()) with given @c bl, and
// advance @pos if all bytes in the range match
//
// @param error_at the offset of the first byte which does not match
// @returns true if all bytes match, false otherwise
bool check_bl_advance(bufferlist &bl, uint64_t *error_at = nullptr) {
uint64_t off = 0;
while (off < bl.length()) {
ceph_assert(cur_valid_till >= pos);
uint64_t next = std::min(bl.length() - off, cur_valid_till - pos);
bufferlist to_check;
to_check.substr_of(bl, off, next);
if (current != layers.end() && pos < size) {
if (!current->iter.check_bl_advance(to_check, error_at)) {
if (error_at)
*error_at += off;
return false;
}
} else {
uint64_t at = pos;
for (auto i = to_check.begin(); !i.end(); ++i, ++at) {
if (*i) {
if (error_at)
*error_at = at;
return false;
}
}
}
pos += next;
off += next;
ceph_assert(off <= bl.length());
if (pos >= cur_valid_till) {
ceph_assert(cur_valid_till == pos);
adjust_stack();
}
}
ceph_assert(off == bl.length());
return true;
}
};
iterator begin() {
return iterator(*this);
}
bool deleted() {
return !exists;
}
bool has_contents() {
return layers.size();
}
// takes ownership of gen
void update(ContentsGenerator *gen, const ContDesc &next);
bool check(bufferlist &to_check);
bool check_sparse(const std::map<uint64_t, uint64_t>& extends,
bufferlist &to_check);
const ContDesc &most_recent();
ContentsGenerator *most_recent_gen() {
return layers.begin()->first.get();
}
std::map<std::string, ContDesc> attrs; // Both omap and xattrs
bufferlist header;
bool exists;
bool dirty;
uint64_t version;
std::string redirect_target;
std::map<uint64_t, ChunkDesc> chunk_info;
bool flushed;
private:
std::list<std::pair<std::shared_ptr<ContentsGenerator>, ContDesc> > layers;
};
#endif
| 13,817 | 24.54159 | 131 | h |
null | ceph-main/src/test/osd/TestOpStat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "include/rados/librados.hpp"
#ifndef TESTOPSTAT_H
#define TESTOPSTAT_H
class TestOp;
class TestOpStat {
public:
mutable ceph::mutex stat_lock = ceph::make_mutex("TestOpStat lock");
TestOpStat() = default;
static uint64_t gettime()
{
timeval t;
gettimeofday(&t,0);
return (1000000*t.tv_sec) + t.tv_usec;
}
class TypeStatus {
public:
std::map<TestOp*,uint64_t> inflight;
std::multiset<uint64_t> latencies;
void begin(TestOp *in)
{
ceph_assert(!inflight.count(in));
inflight[in] = gettime();
}
void end(TestOp *in)
{
ceph_assert(inflight.count(in));
uint64_t curtime = gettime();
latencies.insert(curtime - inflight[in]);
inflight.erase(in);
}
void export_latencies(std::map<double,uint64_t> &in) const;
};
std::map<std::string,TypeStatus> stats;
void begin(TestOp *in);
void end(TestOp *in);
friend std::ostream & operator<<(std::ostream &, const TestOpStat &);
};
std::ostream & operator<<(std::ostream &out, const TestOpStat &rhs);
#endif
| 1,199 | 21.222222 | 71 | h |
null | ceph-main/src/test/osd/scrubber_generators.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/// \file generating scrub-related maps & objects for unit tests
#include <functional>
#include <map>
#include <sstream>
#include <string>
#include <variant>
#include <vector>
#include "include/buffer.h"
#include "include/buffer_raw.h"
#include "include/object_fmt.h"
#include "osd/osd_types_fmt.h"
#include "osd/scrubber/pg_scrubber.h"
namespace ScrubGenerator {
/// \todo enhance the MockLog to capture the log messages
class MockLog : public LoggerSinkSet {
public:
void debug(std::stringstream& s) final
{
std::cout << "\n<<debug>> " << s.str() << std::endl;
}
void info(std::stringstream& s) final
{
std::cout << "\n<<info>> " << s.str() << std::endl;
}
void sec(std::stringstream& s) final
{
std::cout << "\n<<sec>> " << s.str() << std::endl;
}
void warn(std::stringstream& s) final
{
std::cout << "\n<<warn>> " << s.str() << std::endl;
}
void error(std::stringstream& s) final
{
err_count++;
std::cout << "\n<<error>> " << s.str() << std::endl;
}
OstreamTemp info() final { return OstreamTemp(CLOG_INFO, this); }
OstreamTemp warn() final { return OstreamTemp(CLOG_WARN, this); }
OstreamTemp error() final { return OstreamTemp(CLOG_ERROR, this); }
OstreamTemp sec() final { return OstreamTemp(CLOG_ERROR, this); }
OstreamTemp debug() final { return OstreamTemp(CLOG_DEBUG, this); }
void do_log(clog_type prio, std::stringstream& ss) final
{
switch (prio) {
case CLOG_DEBUG:
debug(ss);
break;
case CLOG_INFO:
info(ss);
break;
case CLOG_SEC:
sec(ss);
break;
case CLOG_WARN:
warn(ss);
break;
case CLOG_ERROR:
default:
error(ss);
break;
}
}
void do_log(clog_type prio, const std::string& ss) final
{
switch (prio) {
case CLOG_DEBUG:
debug() << ss;
break;
case CLOG_INFO:
info() << ss;
break;
case CLOG_SEC:
sec() << ss;
break;
case CLOG_WARN:
warn() << ss;
break;
case CLOG_ERROR:
default:
error() << ss;
break;
}
}
virtual ~MockLog() {}
int err_count{0};
int expected_err_count{0};
void set_expected_err_count(int c) { expected_err_count = c; }
};
// ///////////////////////////////////////////////////////////////////////// //
// ///////////////////////////////////////////////////////////////////////// //
struct pool_conf_t {
int pg_num{3};
int pgp_num{3};
int size{3};
int min_size{3};
std::string name{"rep_pool"};
};
using attr_t = std::map<std::string, std::string>;
using all_clones_snaps_t = std::map<hobject_t, std::vector<snapid_t>>;
struct RealObj;
// a function to manipulate (i.e. corrupt) an object in a specific OSD
using CorruptFunc =
std::function<RealObj(const RealObj& s, [[maybe_unused]] int osd_num)>;
using CorruptFuncList = std::map<int, CorruptFunc>; // per OSD
struct SnapsetMockData {
using CookedCloneSnaps =
std::tuple<std::map<snapid_t, uint64_t>,
std::map<snapid_t, std::vector<snapid_t>>,
std::map<snapid_t, interval_set<uint64_t>>>;
// an auxiliary function to cook the data for the SnapsetMockData
using clone_snaps_cooker = CookedCloneSnaps (*)();
snapid_t seq;
std::vector<snapid_t> snaps; // descending
std::vector<snapid_t> clones; // ascending
std::map<snapid_t, interval_set<uint64_t>> clone_overlap; // overlap w/ next
// newest
std::map<snapid_t, uint64_t> clone_size;
std::map<snapid_t, std::vector<snapid_t>> clone_snaps; // descending
SnapsetMockData(snapid_t seq,
std::vector<snapid_t> snaps,
std::vector<snapid_t> clones,
std::map<snapid_t, interval_set<uint64_t>> clone_overlap,
std::map<snapid_t, uint64_t> clone_size,
std::map<snapid_t, std::vector<snapid_t>> clone_snaps)
: seq(seq)
, snaps(snaps)
, clones(clones)
, clone_overlap(clone_overlap)
, clone_size(clone_size)
, clone_snaps(clone_snaps)
{}
SnapsetMockData(snapid_t seq,
std::vector<snapid_t> snaps,
std::vector<snapid_t> clones,
clone_snaps_cooker func)
: seq{seq}
, snaps{snaps}
, clones(clones)
{
auto [clone_size_, clone_snaps_, clone_overlap_] = func();
clone_size = clone_size_;
clone_snaps = clone_snaps_;
clone_overlap = clone_overlap_;
}
SnapSet make_snapset() const
{
SnapSet ss;
ss.seq = seq;
ss.snaps = snaps;
ss.clones = clones;
ss.clone_overlap = clone_overlap;
ss.clone_size = clone_size;
ss.clone_snaps = clone_snaps;
return ss;
}
};
// an object in our "DB" - with its versioned snaps, "data" (size and hash),
// and "omap" (size and hash)
struct RealData {
// not needed at this level of "data falsification": std::byte data;
uint64_t size;
uint32_t hash;
uint32_t omap_digest;
uint32_t omap_bytes;
attr_t omap;
attr_t attrs;
};
struct RealObj {
// the ghobject - oid, version, snap, hash, pool
ghobject_t ghobj;
RealData data;
const CorruptFuncList* corrupt_funcs;
const SnapsetMockData* snapset_mock_data;
};
static inline RealObj crpt_do_nothing(const RealObj& s, int osdn)
{
return s;
}
struct SmapEntry {
ghobject_t ghobj;
ScrubMap::object smobj;
};
ScrubGenerator::SmapEntry make_smobject(
const ScrubGenerator::RealObj& blueprint, // the whole set of versions
int osd_num);
/**
* returns the object's snap-set
*/
void add_object(ScrubMap& map, const RealObj& obj_versions, int osd_num);
struct RealObjsConf {
std::vector<RealObj> objs;
};
using RealObjsConfRef = std::unique_ptr<RealObjsConf>;
// RealObjsConf will be "developed" into the following of per-osd sets,
// now with the correct pool ID, and with the corrupting functions
// activated on the data
using RealObjsConfList = std::map<int, RealObjsConfRef>;
RealObjsConfList make_real_objs_conf(int64_t pool_id,
const RealObjsConf& blueprint,
std::vector<int32_t> active_osds);
/**
* create the snap-ids set for all clones appearing in the head
* object's snapset (those will be injected into the scrubber's mock,
* to be used as the 'snap_mapper')
*/
all_clones_snaps_t all_clones(const RealObj& head_obj);
} // namespace ScrubGenerator
template <>
struct fmt::formatter<ScrubGenerator::RealObj> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const ScrubGenerator::RealObj& rlo, FormatContext& ctx)
{
using namespace ScrubGenerator;
return fmt::format_to(ctx.out(),
"RealObj(gh:{}, dt:{}, snaps:{})",
rlo.ghobj,
rlo.data.size,
(rlo.snapset_mock_data ? rlo.snapset_mock_data->snaps
: std::vector<snapid_t>{}));
}
};
| 6,753 | 24.29588 | 79 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.