]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: add DAOS SAL implementation 47709/head
authorZuhair AlSader <zuhair.alsader@seagate.com>
Sun, 2 Jan 2022 23:30:52 +0000 (23:30 +0000)
committerZuhair AlSader <zuhair.alsader@seagate.com>
Wed, 7 Sep 2022 16:19:41 +0000 (12:19 -0400)
To build with DAOS backend, use -DWITH_RADOSGW_DAOS=YES cmake
option. `daos-devel` rpm should be installed beforehand.

To connect to DAOS pool, add the following configuration
parameters to ceph.conf:
```
    [client]
            ...
            rgw backend store = daos
            daos pool = tank
```

A pool could be created using the following command:
```
    dmg pool create --size=<size> <pool_name>
```

To install `daos-devel` do:
```
    sudo wget -O /etc/yum.repos.d/daos-packages.repo https://packages.daos.io/v2.0/EL8/packages/x86_64/daos_packages.repo
    sudo rpm --import https://packages.daos.io/RPM-GPG-KEY
    sudo yum install -y epel-release daos-server daos-client daos-devel
```

Co-authored-by: Walter Warniaha <walter.warniaha@seagate.com>
Signed-off-by: Zuhair AlSader <zuhair.alsader@seagate.com>
CMakeLists.txt
cmake/modules/FindDAOS.cmake [new file with mode: 0644]
src/common/options/rgw.yaml.in
src/include/config-h.in.cmake
src/rgw/CMakeLists.txt
src/rgw/rgw_common.cc
src/rgw/rgw_sal.cc
src/rgw/rgw_sal_daos.cc [new file with mode: 0644]
src/rgw/rgw_sal_daos.h [new file with mode: 0644]
src/rgw/store/daos/README.md [new file with mode: 0644]

index a0184817422fb8c6aecc32fa3febd003213c1afd..639f9d363e0177590ed394ea09864dff65283e7b 100644 (file)
@@ -435,6 +435,7 @@ option(WITH_RADOSGW_KAFKA_ENDPOINT "Rados Gateway's pubsub support for Kafka pus
 option(WITH_RADOSGW_LUA_PACKAGES "Rados Gateway's support for dynamically adding lua packagess" ON)
 option(WITH_RADOSGW_DBSTORE "DBStore backend for Rados Gateway" ON)
 option(WITH_RADOSGW_MOTR "CORTX-Motr backend for Rados Gateway" OFF)
+option(WITH_RADOSGW_DAOS "DAOS backend for RADOS Gateway" OFF)
 option(WITH_RADOSGW_SELECT_PARQUET "Support for s3 select on parquet objects" ON)
 
 option(WITH_SYSTEM_ARROW "Use system-provided arrow" OFF)
diff --git a/cmake/modules/FindDAOS.cmake b/cmake/modules/FindDAOS.cmake
new file mode 100644 (file)
index 0000000..56a435c
--- /dev/null
@@ -0,0 +1,53 @@
+# - Try to find DAOS
+# Once done this will define
+#  DAOS_FOUND - System has DAOS
+#  DAOS_INCLUDE_DIRS - The DAOS include directories
+#  DAOS_LIBRARIES - The libraries needed to use DAOS
+
+# Uncomment when DAOS provides .pc files
+#find_package(PkgConfig)
+#pkg_check_modules(PC_DAOS daos)
+
+find_path(DAOS_INCLUDE_DIR daos.h
+  HINTS ${PC_DAOS_INCLUDEDIR} ${PC_DAOS_INCLUDE_DIRS}
+  PATHS /usr/local/include /usr/include)
+
+find_path(DAOS_FS_INCLUDE_DIR daos_fs.h
+  HINTS ${PC_DAOS_INCLUDEDIR} ${PC_DAOS_INCLUDE_DIRS}
+  PATHS /usr/local/include /usr/include)
+
+find_path(DAOS_FS_INCLUDE_DIR daos_s3.h
+  HINTS ${PC_DAOS_INCLUDEDIR} ${PC_DAOS_INCLUDE_DIRS}
+  PATHS /usr/local/include /usr/include)
+
+find_library(DAOS_LIBRARY NAMES daos
+  HINTS ${PC_DAOS_LIBDIR} ${PC_DAOS_LIBRARY_DIRS}
+  PATHS /usr/local/lib64 /usr/local/lib /usr/lib64 /usr/lib)
+
+find_library(DAOS_FS_LIBRARY NAMES dfs
+  HINTS ${PC_DAOS_LIBDIR} ${PC_DAOS_LIBRARY_DIRS}
+  PATHS /usr/local/lib64 /usr/local/lib /usr/lib64 /usr/lib)
+
+find_library(DAOS_FS_LIBRARY NAMES ds3
+  HINTS ${PC_DAOS_LIBDIR} ${PC_DAOS_LIBRARY_DIRS}
+  PATHS /usr/local/lib64 /usr/local/lib /usr/lib64 /usr/lib)
+
+find_library(DAOS_UNS_LIBRARY NAMES duns
+  HINTS ${PC_DAOS_LIBDIR} ${PC_DAOS_LIBRARY_DIRS}
+  PATHS /usr/local/lib64 /usr/local/lib /usr/lib64 /usr/lib)
+
+set(DAOS_INCLUDE_DIRS ${DAOS_INCLUDE_DIR} ${DAOS_FS_INCLUDE_DIR})
+set(DAOS_LIBRARIES ${DAOS_LIBRARY} ${DAOS_FS_LIBRARY} ${DAOS_UNS_LIBRARY})
+
+include(FindPackageHandleStandardArgs)
+include_directories( ${PC_DAOS_INCLUDEDIR} )
+link_directories( ${PC_DAOS_LIBDIR} )
+
+# handle the QUIETLY and REQUIRED arguments and set DAOS_FOUND to TRUE
+# if all listed variables are TRUE
+find_package_handle_standard_args(DAOS DEFAULT_MSG
+                                  DAOS_INCLUDE_DIR DAOS_FS_INCLUDE_DIR 
+                                  DAOS_LIBRARY DAOS_FS_LIBRARY DAOS_UNS_LIBRARY)
+
+mark_as_advanced(DAOS_INCLUDE_DIR DAOS_FS_INCLUDE_DIR 
+                 DAOS_LIBRARY DAOS_FS_LIBRARY DAOS_UNS_LIBRARY)
index ca2db88ae291673cd88802d925f1796822184502..715e14e9e286fbbc24637d8840bcc2b8b7289f93 100644 (file)
@@ -3455,7 +3455,7 @@ options:
   type: str
   level: advanced
   desc: experimental Option to set backend store type
-  long_desc: defaults to rados. Other valid values are dbstore and motr (both experimental).
+  long_desc: defaults to rados. Other valid values are dbstore, motr, and daos (All experimental).
   default: rados
   services:
   - rgw
@@ -3463,6 +3463,7 @@ options:
   - rados
   - dbstore
   - motr
+  - daos
 - name: rgw_filter
   type: str
   level: advanced
@@ -3590,3 +3591,10 @@ options:
   - rgw
   see_also:
   - rgw_trust_forwarded_https
+- name: daos_pool
+  type: str
+  level: advanced
+  desc: DAOS Pool to use
+  default: tank
+  services:
+  - rgw
index 421ce82456325613378879330fa5cfec27321f3f..cc9ad0ec7f7bd8ba4c862a9286812e13c28b2dff 100644 (file)
 /* Backend CORTX-Motr for Rados Gateway */
 #cmakedefine WITH_RADOSGW_MOTR
 
+/* Backend CORTX-DAOS for Rados Gateway */
+#cmakedefine WITH_RADOSGW_DAOS
+
 /* Defined if std::map::merge() is supported */
 #cmakedefine HAVE_STDLIB_MAP_SPLICING
 
index 01a4e6c3fe654451362160a1cca39d01337dd52a..a9f4f6637ab13865cbb7b6a8ab9ff90c69c61402 100644 (file)
@@ -184,6 +184,9 @@ endif()
 if(WITH_RADOSGW_MOTR)
   list(APPEND librgw_common_srcs rgw_sal_motr.cc)
 endif()
+if(WITH_RADOSGW_DAOS)
+  list(APPEND librgw_common_srcs rgw_sal_daos.cc)
+endif()
 if(WITH_JAEGER)
   list(APPEND librgw_common_srcs rgw_tracer.cc)
 endif()
@@ -272,6 +275,14 @@ if(WITH_RADOSGW_MOTR)
   target_link_libraries(rgw_common PRIVATE motr::motr)
 endif()
 
+if(WITH_RADOSGW_DAOS)
+  find_package(DAOS REQUIRED)
+  set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DDEBUG ")
+  target_link_libraries(rgw_common PRIVATE daos dfs ds3 uuid duns)
+  target_include_directories(rgw_common PRIVATE ${PC_DAOS_INCLUDEDIR} )
+  link_directories( ${PC_DAOS_LIBRARY_DIRS} )
+endif()
+
 set(rgw_a_srcs
   rgw_auth_keystone.cc
   rgw_client_io.cc
index c95707741ed5128c7fcc6ac0c209e43a7f6a7443..43d4a043ae57df9d5fdbf2aba3c41d8b7760bb69 100644 (file)
@@ -3048,7 +3048,8 @@ rgw_global_init(const std::map<std::string,std::string> *defaults,
   const auto& config_store = g_conf().get_val<std::string>("rgw_backend_store");
 
   if ((config_store == "dbstore") ||
-      (config_store == "motr")) {
+      (config_store == "motr") || 
+      (config_store == "daos")) {
     // These stores don't use the mon
     flags |= CINIT_FLAG_NO_MON_CONFIG;
   }
index 02c081847237670dbb0f97c9666b4aea17d2e542..1798775dcc0097c901ab9073fb09c92110183db9 100644 (file)
@@ -33,6 +33,9 @@
 #include "rgw_sal_motr.h"
 #endif
 
+#ifdef WITH_RADOSGW_DAOS
+#include "rgw_sal_daos.h"
+#endif
 
 #define dout_subsys ceph_subsys_rgw
 
@@ -44,7 +47,11 @@ extern rgw::sal::Store* newDBStore(CephContext *cct);
 #ifdef WITH_RADOSGW_MOTR
 extern rgw::sal::Store* newMotrStore(CephContext *cct);
 #endif
+#ifdef WITH_RADOSGW_DAOS
+extern rgw::sal::Store* newDaosStore(CephContext *cct);
+#endif
 extern rgw::sal::Store* newBaseFilter(rgw::sal::Store* next);
+
 }
 
 RGWObjState::RGWObjState() {
@@ -211,6 +218,22 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d
   }
 #endif
 
+#ifdef WITH_RADOSGW_DAOS
+  else if (cfg.store_name.compare("daos") == 0) {
+    store = newDaosStore(cct);
+    if (store == nullptr) {
+      ldpp_dout(dpp, 0) << "newDaosStore() failed!" << dendl;
+      return store;
+    }
+    int ret = store->initialize(cct, dpp);
+    if (ret != 0) {
+      ldpp_dout(dpp, 20) << "ERROR: store->initialize() failed: " << ret << dendl;
+      delete store;
+      return nullptr;
+    }
+  }
+#endif
+
   if (cfg.filter_name.compare("base") == 0) {
     rgw::sal::Store* next = store;
     store = newBaseFilter(next);
@@ -265,6 +288,17 @@ rgw::sal::Store* StoreManager::init_raw_storage_provider(const DoutPrefixProvide
     store = newMotrStore(cct);
 #else
     store = nullptr;
+#endif
+  } else if (cfg.store_name.compare("daos") == 0) {
+#ifdef WITH_RADOSGW_DAOS
+    store = newDaosStore(cct);
+
+    if (store->initialize(cct, dpp) < 0) {
+      delete store;
+      return nullptr;
+    }
+#else
+    store = nullptr;
 #endif
   }
 
@@ -324,6 +358,11 @@ StoreManager::Config StoreManager::get_config(bool admin, CephContext* cct)
     cfg.store_name = "motr";
   }
 #endif
+#ifdef WITH_RADOSGW_DAOS
+  else if (config_store == "daos") {
+    cfg.store_name = "daos";
+  }
+#endif
 
   // Get the filter
   cfg.filter_name = "none";
diff --git a/src/rgw/rgw_sal_daos.cc b/src/rgw/rgw_sal_daos.cc
new file mode 100644 (file)
index 0000000..9ccb509
--- /dev/null
@@ -0,0 +1,2473 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=2 sw=2 expandtab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * SAL implementation for the CORTX DAOS backend
+ *
+ * Copyright (C) 2022 Seagate Technology LLC and/or its Affiliates
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "rgw_sal_daos.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <filesystem>
+#include <system_error>
+
+#include "common/Clock.h"
+#include "common/errno.h"
+#include "rgw_bucket.h"
+#include "rgw_compression.h"
+#include "rgw_sal.h"
+
+#define dout_subsys ceph_subsys_rgw
+
+using std::list;
+using std::map;
+using std::set;
+using std::string;
+using std::vector;
+
+namespace fs = std::filesystem;
+
+namespace rgw::sal {
+
+using ::ceph::decode;
+using ::ceph::encode;
+
+int DaosUser::list_buckets(const DoutPrefixProvider* dpp, const string& marker,
+                           const string& end_marker, uint64_t max,
+                           bool need_stats, BucketList& buckets,
+                           optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: list_user_buckets: marker=" << marker
+                     << " end_marker=" << end_marker << " max=" << max << dendl;
+  int ret = 0;
+  bool is_truncated = false;
+  buckets.clear();
+  vector<struct ds3_bucket_info> bucket_infos(max);
+  daos_size_t bcount = bucket_infos.size();
+  vector<vector<uint8_t>> values(bcount, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
+  for (daos_size_t i = 0; i < bcount; i++) {
+    bucket_infos[i].encoded = values[i].data();
+    bucket_infos[i].encoded_length = values[i].size();
+  }
+
+  char daos_marker[DS3_MAX_BUCKET_NAME];
+  std::strncpy(daos_marker, marker.c_str(), sizeof(daos_marker));
+  ret = ds3_bucket_list(&bcount, bucket_infos.data(), daos_marker,
+                        &is_truncated, store->ds3, nullptr);
+  ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_list: bcount=" << bcount
+                     << " ret=" << ret << dendl;
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_list failed!" << ret << dendl;
+    return ret;
+  }
+
+  bucket_infos.resize(bcount);
+  values.resize(bcount);
+
+  for (const auto& bi : bucket_infos) {
+    DaosBucketInfo dbinfo;
+    bufferlist bl;
+    bl.append(reinterpret_cast<char*>(bi.encoded), bi.encoded_length);
+    auto iter = bl.cbegin();
+    dbinfo.decode(iter);
+    buckets.add(std::make_unique<DaosBucket>(this->store, dbinfo.info, this));
+  }
+
+  buckets.set_truncated(is_truncated);
+  return 0;
+}
+
+int DaosUser::create_bucket(
+    const DoutPrefixProvider* dpp, const rgw_bucket& b,
+    const std::string& zonegroup_id, rgw_placement_rule& placement_rule,
+    std::string& swift_ver_location, const RGWQuotaInfo* pquota_info,
+    const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info,
+    obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed,
+    req_info& req_info, std::unique_ptr<Bucket>* bucket_out, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: create_bucket:" << b.name << dendl;
+  int ret;
+  std::unique_ptr<Bucket> bucket;
+
+  // Look up the bucket. Create it if it doesn't exist.
+  ret = this->store->get_bucket(dpp, this, b, &bucket, y);
+  if (ret != 0 && ret != -ENOENT) {
+    return ret;
+  }
+
+  if (ret != -ENOENT) {
+    *existed = true;
+    if (swift_ver_location.empty()) {
+      swift_ver_location = bucket->get_info().swift_ver_location;
+    }
+    placement_rule.inherit_from(bucket->get_info().placement_rule);
+
+    // TODO: ACL policy
+    // // don't allow changes to the acl policy
+    // RGWAccessControlPolicy old_policy(ctx());
+    // int rc = rgw_op_get_bucket_policy_from_attr(
+    //           dpp, this, u, bucket->get_attrs(), &old_policy, y);
+    // if (rc >= 0 && old_policy != policy) {
+    //    bucket_out->swap(bucket);
+    //    return -EEXIST;
+    //}
+  } else {
+    placement_rule.name = "default";
+    placement_rule.storage_class = "STANDARD";
+    bucket = std::make_unique<DaosBucket>(store, b, this);
+    bucket->set_attrs(attrs);
+
+    *existed = false;
+  }
+
+  // TODO: how to handle zone and multi-site.
+
+  if (!*existed) {
+    info.placement_rule = placement_rule;
+    info.bucket = b;
+    info.owner = this->get_info().user_id;
+    info.zonegroup = zonegroup_id;
+    info.creation_time = ceph::real_clock::now();
+    if (obj_lock_enabled)
+      info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
+    bucket->set_version(ep_objv);
+    bucket->get_info() = info;
+
+    // Create a new bucket:
+    DaosBucket* daos_bucket = static_cast<DaosBucket*>(bucket.get());
+    bufferlist bl;
+    std::unique_ptr<struct ds3_bucket_info> bucket_info =
+        daos_bucket->get_encoded_info(bl, ceph::real_time());
+    ret = ds3_bucket_create(bucket->get_name().c_str(), bucket_info.get(),
+                            nullptr, store->ds3, nullptr);
+    if (ret != 0) {
+      ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_create failed! ret=" << ret
+                        << dendl;
+      return ret;
+    }
+  } else {
+    bucket->set_version(ep_objv);
+    bucket->get_info() = info;
+  }
+
+  bucket_out->swap(bucket);
+
+  return ret;
+}
+
+int DaosUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosUser::read_stats(const DoutPrefixProvider* dpp, optional_yield y,
+                         RGWStorageStats* stats,
+                         ceph::real_time* last_stats_sync,
+                         ceph::real_time* last_stats_update) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+/* stats - Not for first pass */
+int DaosUser::read_stats_async(const DoutPrefixProvider* dpp,
+                               RGWGetUserStats_CB* cb) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosUser::complete_flush_stats(const DoutPrefixProvider* dpp,
+                                   optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosUser::read_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
+                         uint64_t end_epoch, uint32_t max_entries,
+                         bool* is_truncated, RGWUsageIter& usage_iter,
+                         map<rgw_user_bucket, rgw_usage_log_entry>& usage) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosUser::trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
+                         uint64_t end_epoch) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosUser::load_user(const DoutPrefixProvider* dpp, optional_yield y) {
+  const string name = info.user_id.to_str();
+  ldpp_dout(dpp, 20) << "DEBUG: load_user, name=" << name << dendl;
+
+  DaosUserInfo duinfo;
+  int ret = read_user(dpp, name, &duinfo);
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: load_user failed, name=" << name << dendl;
+    return ret;
+  }
+
+  info = duinfo.info;
+  attrs = duinfo.attrs;
+  objv_tracker.read_version = duinfo.user_version;
+  return 0;
+}
+
+int DaosUser::merge_and_store_attrs(const DoutPrefixProvider* dpp,
+                                    Attrs& new_attrs, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: merge_and_store_attrs, new_attrs=" << new_attrs
+                     << dendl;
+  for (auto& it : new_attrs) {
+    attrs[it.first] = it.second;
+  }
+  return store_user(dpp, y, false);
+}
+
+int DaosUser::store_user(const DoutPrefixProvider* dpp, optional_yield y,
+                         bool exclusive, RGWUserInfo* old_info) {
+  const string name = info.user_id.to_str();
+  ldpp_dout(dpp, 10) << "DEBUG: Store_user(): User name=" << name << dendl;
+
+  // Read user
+  int ret = 0;
+  struct DaosUserInfo duinfo;
+  ret = read_user(dpp, name, &duinfo);
+  obj_version obj_ver = duinfo.user_version;
+  std::unique_ptr<struct ds3_user_info> old_user_info;
+  std::vector<const char*> old_access_ids;
+
+  // Check if the user already exists
+  if (ret == 0 && obj_ver.ver) {
+    // already exists.
+
+    if (old_info) {
+      *old_info = duinfo.info;
+    }
+
+    if (objv_tracker.read_version.ver != obj_ver.ver) {
+      // Object version mismatch.. return ECANCELED
+      ret = -ECANCELED;
+      ldpp_dout(dpp, 0) << "User Read version mismatch read_version="
+                        << objv_tracker.read_version.ver
+                        << " obj_ver=" << obj_ver.ver << dendl;
+      return ret;
+    }
+
+    if (exclusive) {
+      // return
+      return ret;
+    }
+    obj_ver.ver++;
+
+    for (auto const& [id, key] : duinfo.info.access_keys) {
+      old_access_ids.push_back(id.c_str());
+    }
+    old_user_info.reset(
+        new ds3_user_info{.name = duinfo.info.user_id.to_str().c_str(),
+                          .email = duinfo.info.user_email.c_str(),
+                          .access_ids = old_access_ids.data(),
+                          .access_ids_nr = old_access_ids.size()});
+  } else {
+    obj_ver.ver = 1;
+    obj_ver.tag = "UserTAG";
+  }
+
+  bufferlist bl;
+  std::unique_ptr<struct ds3_user_info> user_info =
+      get_encoded_info(bl, obj_ver);
+
+  ret = ds3_user_set(name.c_str(), user_info.get(), old_user_info.get(),
+                     store->ds3, nullptr);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "Error: ds3_user_set failed, name=" << name
+                      << " ret=" << ret << dendl;
+  }
+
+  return ret;
+}
+
+int DaosUser::read_user(const DoutPrefixProvider* dpp, std::string name,
+                        DaosUserInfo* duinfo) {
+  // Initialize ds3_user_info
+  bufferlist bl;
+  uint64_t size = DS3_MAX_ENCODED_LEN;
+  struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(),
+                                    .encoded_length = size};
+
+  int ret = ds3_user_get(name.c_str(), &user_info, store->ds3, nullptr);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "Error: ds3_user_get failed, name=" << name
+                      << " ret=" << ret << dendl;
+    return ret;
+  }
+
+  // Decode
+  bufferlist& blr = bl;
+  auto iter = blr.cbegin();
+  duinfo->decode(iter);
+  return ret;
+}
+
+std::unique_ptr<struct ds3_user_info> DaosUser::get_encoded_info(
+    bufferlist& bl, obj_version& obj_ver) {
+  // Encode user data
+  struct DaosUserInfo duinfo;
+  duinfo.info = info;
+  duinfo.attrs = attrs;
+  duinfo.user_version = obj_ver;
+  duinfo.encode(bl);
+
+  // Initialize ds3_user_info
+  access_ids.clear();
+  for (auto const& [id, key] : info.access_keys) {
+    access_ids.push_back(id.c_str());
+  }
+  return std::unique_ptr<struct ds3_user_info>(
+      new ds3_user_info{.name = info.user_id.to_str().c_str(),
+                        .email = info.user_email.c_str(),
+                        .access_ids = access_ids.data(),
+                        .access_ids_nr = access_ids.size(),
+                        .encoded = bl.c_str(),
+                        .encoded_length = bl.length()});
+}
+
+int DaosUser::remove_user(const DoutPrefixProvider* dpp, optional_yield y) {
+  const string name = info.user_id.to_str();
+
+  // TODO: the expectation is that the object version needs to be passed in as a
+  // method arg see int DB::remove_user(const DoutPrefixProvider *dpp,
+  // RGWUserInfo& uinfo, RGWObjVersionTracker *pobjv)
+  obj_version obj_ver;
+  bufferlist bl;
+  std::unique_ptr<struct ds3_user_info> user_info =
+      get_encoded_info(bl, obj_ver);
+
+  // Remove user
+  int ret = ds3_user_remove(name.c_str(), user_info.get(), store->ds3, nullptr);
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "Error: ds3_user_set failed, name=" << name
+                      << " ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+DaosBucket::~DaosBucket() { close(nullptr); }
+
+int DaosBucket::open(const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << "DEBUG: open, name=" << info.bucket.name.c_str()
+                     << dendl;
+  // Idempotent
+  if (is_open()) {
+    return 0;
+  }
+
+  int ret = ds3_bucket_open(get_name().c_str(), &ds3b, store->ds3, nullptr);
+  ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_open, name=" << get_name()
+                     << ", ret=" << ret << dendl;
+
+  return ret;
+}
+
+int DaosBucket::close(const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << "DEBUG: close" << dendl;
+  // Idempotent
+  if (!is_open()) {
+    return 0;
+  }
+
+  int ret = ds3_bucket_close(ds3b, nullptr);
+  ds3b = nullptr;
+  ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_close ret=" << ret << dendl;
+
+  return ret;
+}
+
+std::unique_ptr<struct ds3_bucket_info> DaosBucket::get_encoded_info(
+    bufferlist& bl, ceph::real_time _mtime) {
+  DaosBucketInfo dbinfo;
+  dbinfo.info = info;
+  dbinfo.bucket_attrs = attrs;
+  dbinfo.mtime = _mtime;
+  dbinfo.bucket_version = bucket_version;
+  dbinfo.encode(bl);
+
+  auto bucket_info = std::make_unique<struct ds3_bucket_info>();
+  bucket_info->encoded = bl.c_str();
+  bucket_info->encoded_length = bl.length();
+  std::strncpy(bucket_info->name, get_name().c_str(), sizeof(bucket_info->name));
+  return bucket_info;
+}
+
+int DaosBucket::remove_bucket(const DoutPrefixProvider* dpp,
+                              bool delete_children, bool forward_to_master,
+                              req_info* req_info, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: remove_bucket, delete_children="
+                    
+                     << delete_children
+                    
+                     << " forward_to_master=" << forward_to_master << dendl;
+
+  return ds3_bucket_destroy(get_name().c_str(), delete_children, store->ds3,
+                            nullptr);
+}
+
+int DaosBucket::remove_bucket_bypass_gc(int concurrent_max,
+                                        bool keep_index_consistent,
+                                        optional_yield y,
+                                        const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << "DEBUG: remove_bucket_bypass_gc, concurrent_max="
+                    
+                     << concurrent_max
+                    
+                     << " keep_index_consistent=" << keep_index_consistent
+                    
+                     << dendl;
+  return ds3_bucket_destroy(get_name().c_str(), true, store->ds3, nullptr);
+}
+
+int DaosBucket::put_info(const DoutPrefixProvider* dpp, bool exclusive,
+                         ceph::real_time _mtime) {
+  ldpp_dout(dpp, 20) << "DEBUG: put_info(): bucket name=" << get_name()
+                     << dendl;
+
+  int ret = open(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  bufferlist bl;
+  std::unique_ptr<struct ds3_bucket_info> bucket_info =
+      get_encoded_info(bl, ceph::real_time());
+
+  ret = ds3_bucket_set_info(bucket_info.get(), ds3b, nullptr);
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_set_info failed: " << ret << dendl;
+  }
+  return ret;
+}
+
+int DaosBucket::load_bucket(const DoutPrefixProvider* dpp, optional_yield y,
+                            bool get_stats) {
+  ldpp_dout(dpp, 20) << "DEBUG: load_bucket(): bucket name=" << get_name()
+                     << dendl;
+  int ret = open(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  bufferlist bl;
+  DaosBucketInfo dbinfo;
+  uint64_t size = DS3_MAX_ENCODED_LEN;
+  struct ds3_bucket_info bucket_info = {.encoded = bl.append_hole(size).c_str(),
+                                        .encoded_length = size};
+
+  ret = ds3_bucket_get_info(&bucket_info, ds3b, nullptr);
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_get_info failed: " << ret << dendl;
+    return ret;
+  }
+
+  auto iter = bl.cbegin();
+  dbinfo.decode(iter);
+  info = dbinfo.info;
+  rgw_placement_rule placement_rule;
+  placement_rule.name = "default";
+  placement_rule.storage_class = "STANDARD";
+  info.placement_rule = placement_rule;
+
+  attrs = dbinfo.bucket_attrs;
+  mtime = dbinfo.mtime;
+  bucket_version = dbinfo.bucket_version;
+  return ret;
+}
+
+/* stats - Not for first pass */
+int DaosBucket::read_stats(const DoutPrefixProvider* dpp,
+                           const bucket_index_layout_generation& idx_layout,
+                           int shard_id, std::string* bucket_ver,
+                           std::string* master_ver,
+                           std::map<RGWObjCategory, RGWStorageStats>& stats,
+                           std::string* max_marker, bool* syncstopped) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::read_stats_async(
+    const DoutPrefixProvider* dpp,
+    const bucket_index_layout_generation& idx_layout, int shard_id,
+    RGWGetBucketStats_CB* ctx) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::sync_user_stats(const DoutPrefixProvider* dpp,
+                                optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::update_container_stats(const DoutPrefixProvider* dpp) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::check_bucket_shards(const DoutPrefixProvider* dpp) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::chown(const DoutPrefixProvider* dpp, User* new_user,
+                      User* old_user, optional_yield y,
+                      const std::string* marker) {
+  /* XXX: Update policies of all the bucket->objects with new user */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+/* Make sure to call load_bucket() if you need it first */
+bool DaosBucket::is_owner(User* user) {
+  return (info.owner.compare(user->get_id()) == 0);
+}
+
+int DaosBucket::check_empty(const DoutPrefixProvider* dpp, optional_yield y) {
+  /* XXX: Check if bucket contains any objects */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::check_quota(const DoutPrefixProvider* dpp, RGWQuota& quota,
+                            uint64_t obj_size, optional_yield y,
+                            bool check_size_only) {
+  /* Not Handled in the first pass as stats are also needed */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::merge_and_store_attrs(const DoutPrefixProvider* dpp,
+                                      Attrs& new_attrs, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: merge_and_store_attrs, new_attrs=" << new_attrs
+                     << dendl;
+  for (auto& it : new_attrs) {
+    attrs[it.first] = it.second;
+  }
+
+  return put_info(dpp, y, ceph::real_time());
+}
+
+int DaosBucket::try_refresh_info(const DoutPrefixProvider* dpp,
+                                 ceph::real_time* pmtime) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+/* XXX: usage and stats not supported in the first pass */
+int DaosBucket::read_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
+                           uint64_t end_epoch, uint32_t max_entries,
+                           bool* is_truncated, RGWUsageIter& usage_iter,
+                           map<rgw_user_bucket, rgw_usage_log_entry>& usage) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
+                           uint64_t end_epoch) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::remove_objs_from_index(
+    const DoutPrefixProvider* dpp,
+    std::list<rgw_obj_index_key>& objs_to_unlink) {
+  /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table.
+   * Delete all the object in the list from the object table of this
+   * bucket
+   */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::check_index(
+    const DoutPrefixProvider* dpp,
+    std::map<RGWObjCategory, RGWStorageStats>& existing_stats,
+    std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) {
+  /* XXX: stats not supported yet */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::rebuild_index(const DoutPrefixProvider* dpp) {
+  /* there is no index table in DAOS. Not applicable */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::set_tag_timeout(const DoutPrefixProvider* dpp,
+                                uint64_t timeout) {
+  /* XXX: CHECK: set tag timeout for all the bucket objects? */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::purge_instance(const DoutPrefixProvider* dpp) {
+  /* XXX: CHECK: for DAOS only single instance supported.
+   * Remove all the objects for that instance? Anything extra needed?
+   */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosBucket::set_acl(const DoutPrefixProvider* dpp,
+                        RGWAccessControlPolicy& acl, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: set_acl" << dendl;
+  int ret = 0;
+  bufferlist aclbl;
+
+  acls = acl;
+  acl.encode(aclbl);
+
+  Attrs attrs = get_attrs();
+  attrs[RGW_ATTR_ACL] = aclbl;
+
+  return ret;
+}
+
+std::unique_ptr<Object> DaosBucket::get_object(const rgw_obj_key& k) {
+  return std::make_unique<DaosObject>(this->store, k, this);
+}
+
+bool compare_rgw_bucket_dir_entry(rgw_bucket_dir_entry& entry1,
+                                  rgw_bucket_dir_entry& entry2) {
+  return (entry1.key < entry2.key);
+}
+
+bool compare_multipart_upload(std::unique_ptr<MultipartUpload>& upload1,
+                              std::unique_ptr<MultipartUpload>& upload2) {
+  return (upload1->get_key() < upload2->get_key());
+}
+
+int DaosBucket::list(const DoutPrefixProvider* dpp, ListParams& params, int max,
+                     ListResults& results, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: list bucket=" << get_name() << " max=" << max
+                     << " params=" << params << dendl;
+  // End
+  if (max == 0) {
+    return 0;
+  }
+
+  int ret = open(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Init needed structures
+  vector<struct ds3_object_info> object_infos(max);
+  uint32_t nobj = object_infos.size();
+  vector<vector<uint8_t>> values(nobj, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
+  for (uint32_t i = 0; i < nobj; i++) {
+    object_infos[i].encoded = values[i].data();
+    object_infos[i].encoded_length = values[i].size();
+  }
+
+  vector<struct ds3_common_prefix_info> common_prefixes(max);
+  uint32_t ncp = common_prefixes.size();
+
+  char daos_marker[DS3_MAX_KEY_BUFF];
+  std::strncpy(daos_marker, params.marker.to_str().c_str(), sizeof(daos_marker));
+
+  ret = ds3_bucket_list_obj(&nobj, object_infos.data(), &ncp,
+                            common_prefixes.data(), params.prefix.c_str(),
+                            params.delim.c_str(), daos_marker,
+                            params.list_versions, &results.is_truncated, ds3b);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: ds3_bucket_list_obj failed, name="
+                      << get_name() << ", ret=" << ret << dendl;
+    return ret;
+  }
+
+  object_infos.resize(nobj);
+  values.resize(nobj);
+  common_prefixes.resize(ncp);
+
+  // Fill common prefixes
+  for (auto const& cp : common_prefixes) {
+    results.common_prefixes[cp.prefix] = true;
+  }
+
+  // Decode objs
+  for (auto const& obj : object_infos) {
+    bufferlist bl;
+    rgw_bucket_dir_entry ent;
+    bl.append(reinterpret_cast<char*>(obj.encoded), obj.encoded_length);
+    auto iter = bl.cbegin();
+    ent.decode(iter);
+    if (params.list_versions || ent.is_visible()) {
+      results.objs.emplace_back(std::move(ent));
+    }
+  }
+
+  if (!params.allow_unordered) {
+    std::sort(results.objs.begin(), results.objs.end(),
+              compare_rgw_bucket_dir_entry);
+  }
+
+  return ret;
+}
+
+int DaosBucket::list_multiparts(
+    const DoutPrefixProvider* dpp, const string& prefix, string& marker,
+    const string& delim, const int& max_uploads,
+    vector<std::unique_ptr<MultipartUpload>>& uploads,
+    map<string, bool>* common_prefixes, bool* is_truncated) {
+  ldpp_dout(dpp, 20) << "DEBUG: list_multiparts" << dendl;
+  // End of uploading
+  if (max_uploads == 0) {
+    *is_truncated = false;
+    return 0;
+  }
+
+  // Init needed structures
+  vector<struct ds3_multipart_upload_info> multipart_upload_infos(max_uploads);
+  uint32_t nmp = multipart_upload_infos.size();
+  vector<vector<uint8_t>> values(nmp, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
+  for (uint32_t i = 0; i < nmp; i++) {
+    multipart_upload_infos[i].encoded = values[i].data();
+    multipart_upload_infos[i].encoded_length = values[i].size();
+  }
+
+  vector<struct ds3_common_prefix_info> cps(max_uploads);
+  uint32_t ncp = cps.size();
+
+  char daos_marker[DS3_MAX_KEY_BUFF];
+  std::strncpy(daos_marker, marker.c_str(), sizeof(daos_marker));
+
+  int ret = ds3_bucket_list_multipart(
+      get_name().c_str(), &nmp, multipart_upload_infos.data(), &ncp, cps.data(),
+      prefix.c_str(), delim.c_str(), daos_marker, is_truncated, store->ds3);
+
+  multipart_upload_infos.resize(nmp);
+  values.resize(nmp);
+  cps.resize(ncp);
+
+  // Fill common prefixes
+  for (auto const& cp : cps) {
+    (*common_prefixes)[cp.prefix] = true;
+  }
+
+  for (auto const& mp : multipart_upload_infos) {
+    // Decode the xattr
+    bufferlist bl;
+    rgw_bucket_dir_entry ent;
+    bl.append(reinterpret_cast<char*>(mp.encoded), mp.encoded_length);
+    auto iter = bl.cbegin();
+    ent.decode(iter);
+    string name = ent.key.name;
+
+    ACLOwner owner(rgw_user(ent.meta.owner));
+    owner.set_name(ent.meta.owner_display_name);
+    uploads.push_back(this->get_multipart_upload(
+        name, mp.upload_id, std::move(owner), ent.meta.mtime));
+  }
+
+  // Sort uploads
+  std::sort(uploads.begin(), uploads.end(), compare_multipart_upload);
+
+  return ret;
+}
+
+int DaosBucket::abort_multiparts(const DoutPrefixProvider* dpp,
+                                 CephContext* cct) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+void DaosStore::finalize(void) {
+  ldout(cctx, 20) << "DEBUG: finalize" << dendl;
+  int ret;
+
+  ret = ds3_disconnect(ds3, nullptr);
+  if (ret != 0) {
+    ldout(cctx, 0) << "ERROR: ds3_disconnect() failed: " << ret << dendl;
+  }
+  ds3 = nullptr;
+
+  ret = ds3_fini();
+  if (ret != 0) {
+    ldout(cctx, 0) << "ERROR: daos_fini() failed: " << ret << dendl;
+  }
+}
+
+int DaosStore::initialize(CephContext* cct, const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << "DEBUG: initialize" << dendl;
+  int ret = ds3_init();
+
+  // DS3 init failed, allow the case where init is already done
+  if (ret != 0 && ret != DER_ALREADY) {
+    ldout(cct, 0) << "ERROR: ds3_init() failed: " << ret << dendl;
+    return ret;
+  }
+
+  // XXX: these params should be taken from config settings and
+  // cct somehow?
+  const auto& daos_pool = cct->_conf.get_val<std::string>("daos_pool");
+  ldout(cct, 20) << "INFO: daos pool: " << daos_pool << dendl;
+
+  ret = ds3_connect(daos_pool.c_str(), nullptr, &ds3, nullptr);
+
+  if (ret != 0) {
+    ldout(cct, 0) << "ERROR: ds3_connect() failed: " << ret << dendl;
+    ds3_fini();
+  }
+
+  return ret;
+}
+
+const std::string& DaosZoneGroup::get_endpoint() const {
+  if (!group.endpoints.empty()) {
+    return group.endpoints.front();
+  } else {
+    // use zonegroup's master zone endpoints
+    auto z = group.zones.find(group.master_zone);
+    if (z != group.zones.end() && !z->second.endpoints.empty()) {
+      return z->second.endpoints.front();
+    }
+  }
+  return empty;
+}
+
+bool DaosZoneGroup::placement_target_exists(std::string& target) const {
+  return !!group.placement_targets.count(target);
+}
+
+int DaosZoneGroup::get_placement_target_names(
+    std::set<std::string>& names) const {
+  for (const auto& target : group.placement_targets) {
+    names.emplace(target.second.name);
+  }
+
+  return 0;
+}
+
+int DaosZoneGroup::get_placement_tier(const rgw_placement_rule& rule,
+                                      std::unique_ptr<PlacementTier>* tier) {
+  std::map<std::string, RGWZoneGroupPlacementTarget>::const_iterator titer;
+  titer = group.placement_targets.find(rule.name);
+  if (titer == group.placement_targets.end()) {
+    return -ENOENT;
+  }
+
+  const auto& target_rule = titer->second;
+  std::map<std::string, RGWZoneGroupPlacementTier>::const_iterator ttier;
+  ttier = target_rule.tier_targets.find(rule.storage_class);
+  if (ttier == target_rule.tier_targets.end()) {
+    // not found
+    return -ENOENT;
+  }
+
+  PlacementTier* t;
+  t = new DaosPlacementTier(store, ttier->second);
+  if (!t) return -ENOMEM;
+
+  tier->reset(t);
+  return 0;
+}
+
+ZoneGroup& DaosZone::get_zonegroup() { return zonegroup; }
+
+int DaosZone::get_zonegroup(const std::string& id,
+                            std::unique_ptr<ZoneGroup>* group) {
+  /* XXX: for now only one zonegroup supported */
+  ZoneGroup* zg;
+  zg = new DaosZoneGroup(store, zonegroup.get_group());
+
+  group->reset(zg);
+  return 0;
+}
+
+const rgw_zone_id& DaosZone::get_id() { return cur_zone_id; }
+
+const std::string& DaosZone::get_name() const {
+  return zone_params->get_name();
+}
+
+bool DaosZone::is_writeable() { return true; }
+
+bool DaosZone::get_redirect_endpoint(std::string* endpoint) { return false; }
+
+bool DaosZone::has_zonegroup_api(const std::string& api) const { return false; }
+
+const std::string& DaosZone::get_current_period_id() {
+  return current_period->get_id();
+}
+
+std::unique_ptr<LuaManager> DaosStore::get_lua_manager() {
+  return std::make_unique<DaosLuaManager>(this);
+}
+
+int DaosObject::get_obj_state(const DoutPrefixProvider* dpp,
+                              RGWObjState** _state, optional_yield y,
+                              bool follow_olh) {
+  // Get object's metadata (those stored in rgw_bucket_dir_entry)
+  ldpp_dout(dpp, 20) << "DEBUG: get_obj_state" << dendl;
+  rgw_bucket_dir_entry ent;
+  *_state = &state;  // state is required even if a failure occurs
+
+  int ret = get_dir_entry_attrs(dpp, &ent);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Set object state.
+  state.exists = true;
+  state.size = ent.meta.size;
+  state.accounted_size = ent.meta.size;
+  state.mtime = ent.meta.mtime;
+
+  state.has_attrs = true;
+  bufferlist etag_bl;
+  string& etag = ent.meta.etag;
+  ldpp_dout(dpp, 20) << __func__ << ": object's etag:  " << ent.meta.etag
+                     << dendl;
+  etag_bl.append(etag);
+  state.attrset[RGW_ATTR_ETAG] = etag_bl;
+  return 0;
+}
+
+DaosObject::~DaosObject() { close(nullptr); }
+
+int DaosObject::set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs,
+                              Attrs* delattrs, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: DaosObject::set_obj_attrs()" << dendl;
+  // TODO handle target_obj
+  // Get object's metadata (those stored in rgw_bucket_dir_entry)
+  rgw_bucket_dir_entry ent;
+  int ret = get_dir_entry_attrs(dpp, &ent);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Update object metadata
+  Attrs updateattrs = setattrs == nullptr ? attrs : *setattrs;
+  if (delattrs) {
+    for (auto const& [attr, attrval] : *delattrs) {
+      updateattrs.erase(attr);
+    }
+  }
+
+  ret = set_dir_entry_attrs(dpp, &ent, &updateattrs);
+  return ret;
+}
+
+int DaosObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp,
+                              rgw_obj* target_obj) {
+  ldpp_dout(dpp, 20) << "DEBUG: DaosObject::get_obj_attrs()" << dendl;
+  // TODO handle target_obj
+  // Get object's metadata (those stored in rgw_bucket_dir_entry)
+  rgw_bucket_dir_entry ent;
+  int ret = get_dir_entry_attrs(dpp, &ent, &attrs);
+  return ret;
+}
+
+int DaosObject::modify_obj_attrs(const char* attr_name, bufferlist& attr_val,
+                                 optional_yield y,
+                                 const DoutPrefixProvider* dpp) {
+  // Get object's metadata (those stored in rgw_bucket_dir_entry)
+  ldpp_dout(dpp, 20) << "DEBUG: modify_obj_attrs" << dendl;
+  rgw_bucket_dir_entry ent;
+  int ret = get_dir_entry_attrs(dpp, &ent, &attrs);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Update object attrs
+  set_atomic();
+  attrs[attr_name] = attr_val;
+
+  ret = set_dir_entry_attrs(dpp, &ent, &attrs);
+  return ret;
+}
+
+int DaosObject::delete_obj_attrs(const DoutPrefixProvider* dpp,
+                                 const char* attr_name, optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: delete_obj_attrs" << dendl;
+  rgw_obj target = get_obj();
+  Attrs rmattr;
+  bufferlist bl;
+
+  rmattr[attr_name] = bl;
+  return set_obj_attrs(dpp, nullptr, &rmattr, y);
+}
+
+bool DaosObject::is_expired() {
+  auto iter = attrs.find(RGW_ATTR_DELETE_AT);
+  if (iter != attrs.end()) {
+    utime_t delete_at;
+    try {
+      auto bufit = iter->second.cbegin();
+      decode(delete_at, bufit);
+    } catch (buffer::error& err) {
+      ldout(store->ctx(), 0)
+          << "ERROR: " << __func__
+          << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
+      return false;
+    }
+
+    if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// Taken from rgw_rados.cc
+void DaosObject::gen_rand_obj_instance_name() {
+  enum { OBJ_INSTANCE_LEN = 32 };
+  char buf[OBJ_INSTANCE_LEN + 1];
+
+  gen_rand_alphanumeric_no_underscore(store->ctx(), buf, OBJ_INSTANCE_LEN);
+  state.obj.key.set_instance(buf);
+}
+
+int DaosObject::omap_get_vals(const DoutPrefixProvider* dpp,
+                              const std::string& marker, uint64_t count,
+                              std::map<std::string, bufferlist>* m, bool* pmore,
+                              optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::omap_get_all(const DoutPrefixProvider* dpp,
+                             std::map<std::string, bufferlist>* m,
+                             optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::omap_get_vals_by_keys(const DoutPrefixProvider* dpp,
+                                      const std::string& oid,
+                                      const std::set<std::string>& keys,
+                                      Attrs* vals) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::omap_set_val_by_key(const DoutPrefixProvider* dpp,
+                                    const std::string& key, bufferlist& val,
+                                    bool must_exist, optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+std::unique_ptr<MPSerializer> DaosObject::get_serializer(
+    const DoutPrefixProvider* dpp, const std::string& lock_name) {
+  return std::make_unique<MPDaosSerializer>(dpp, store, this, lock_name);
+}
+
+int DaosObject::transition(Bucket* bucket,
+                           const rgw_placement_rule& placement_rule,
+                           const real_time& mtime, uint64_t olh_epoch,
+                           const DoutPrefixProvider* dpp, optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::transition_to_cloud(
+    Bucket* bucket, rgw::sal::PlacementTier* tier, rgw_bucket_dir_entry& o,
+    std::set<std::string>& cloud_targets, CephContext* cct, bool update_object,
+    const DoutPrefixProvider* dpp, optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+bool DaosObject::placement_rules_match(rgw_placement_rule& r1,
+                                       rgw_placement_rule& r2) {
+  /* XXX: support single default zone and zonegroup for now */
+  return true;
+}
+
+int DaosObject::dump_obj_layout(const DoutPrefixProvider* dpp, optional_yield y,
+                                Formatter* f) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+std::unique_ptr<Object::ReadOp> DaosObject::get_read_op() {
+  return std::make_unique<DaosObject::DaosReadOp>(this);
+}
+
+DaosObject::DaosReadOp::DaosReadOp(DaosObject* _source) : source(_source) {}
+
+int DaosObject::DaosReadOp::prepare(optional_yield y,
+                                    const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << __func__
+                     << ": bucket=" << source->get_bucket()->get_name()
+                     << dendl;
+
+  if (source->get_bucket()->versioned() && !source->have_instance()) {
+    // If the bucket is versioned and no version is specified, get the latest
+    // version
+    source->set_instance(DS3_LATEST_INSTANCE);
+  }
+
+  rgw_bucket_dir_entry ent;
+  int ret = source->get_dir_entry_attrs(dpp, &ent);
+
+  // Set source object's attrs. The attrs is key/value map and is used
+  // in send_response_data() to set attributes, including etag.
+  bufferlist etag_bl;
+  string& etag = ent.meta.etag;
+  ldpp_dout(dpp, 20) << __func__ << ": object's etag: " << ent.meta.etag
+                     << dendl;
+  etag_bl.append(etag.c_str(), etag.size());
+  source->get_attrs().emplace(std::move(RGW_ATTR_ETAG), std::move(etag_bl));
+
+  source->set_key(ent.key);
+  source->set_obj_size(ent.meta.size);
+  ldpp_dout(dpp, 20) << __func__ << ": object's size: " << ent.meta.size
+                     << dendl;
+
+  return ret;
+}
+
+int DaosObject::DaosReadOp::read(int64_t off, int64_t end, bufferlist& bl,
+                                 optional_yield y,
+                                 const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << __func__ << ": off=" << off << " end=" << end << dendl;
+  int ret = source->lookup(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Calculate size, end is inclusive
+  uint64_t size = end - off + 1;
+
+  // Read
+  ret = source->read(dpp, bl, off, size);
+  if (ret != 0) {
+    return ret;
+  }
+
+  return ret;
+}
+
+// RGWGetObj::execute() calls ReadOp::iterate() to read object from 'off' to
+// 'end'. The returned data is processed in 'cb' which is a chain of
+// post-processing filters such as decompression, de-encryption and sending back
+// data to client (RGWGetObj_CB::handle_dta which in turn calls
+// RGWGetObj::get_data_cb() to send data back.).
+//
+// POC implements a simple sync version of iterate() function in which it reads
+// a block of data each time and call 'cb' for post-processing.
+int DaosObject::DaosReadOp::iterate(const DoutPrefixProvider* dpp, int64_t off,
+                                    int64_t end, RGWGetDataCB* cb,
+                                    optional_yield y) {
+  ldpp_dout(dpp, 20) << __func__ << ": off=" << off << " end=" << end << dendl;
+  int ret = source->lookup(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Calculate size, end is inclusive
+  uint64_t size = end - off + 1;
+
+  // Reserve buffers and read
+  bufferlist bl;
+  ret = source->read(dpp, bl, off, size);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Call cb to process returned data.
+  ldpp_dout(dpp, 20) << __func__ << ": call cb to process data, actual=" << size
+                     << dendl;
+  cb->handle_data(bl, off, size);
+  return ret;
+}
+
+int DaosObject::DaosReadOp::get_attr(const DoutPrefixProvider* dpp,
+                                     const char* name, bufferlist& dest,
+                                     optional_yield y) {
+  Attrs attrs;
+  int ret = source->get_dir_entry_attrs(dpp, nullptr, &attrs);
+  if (!ret) {
+    return -ENODATA;
+  }
+
+  auto search = attrs.find(name);
+  if (search == attrs.end()) {
+    return -ENODATA;
+  }
+
+  dest = search->second;
+  return 0;
+}
+
+std::unique_ptr<Object::DeleteOp> DaosObject::get_delete_op() {
+  return std::make_unique<DaosObject::DaosDeleteOp>(this);
+}
+
+DaosObject::DaosDeleteOp::DaosDeleteOp(DaosObject* _source) : source(_source) {}
+
+// Implementation of DELETE OBJ also requires DaosObject::get_obj_state()
+// to retrieve and set object's state from object's metadata.
+//
+// TODO:
+// 1. The POC only deletes the Daos objects. It doesn't handle the
+// DeleteOp::params. Delete::delete_obj() in rgw_rados.cc shows how rados
+// backend process the params.
+// 2. Delete an object when its versioning is turned on.
+// 3. Handle empty directories
+// 4. Fail when file doesn't exist
+int DaosObject::DaosDeleteOp::delete_obj(const DoutPrefixProvider* dpp,
+                                         optional_yield y) {
+  ldpp_dout(dpp, 20) << "DaosDeleteOp::delete_obj "
+                     << source->get_key().to_str() << " from "
+                     << source->get_bucket()->get_name() << dendl;
+  if (source->get_instance() == "null") {
+    source->clear_instance();
+  }
+
+  // Open bucket
+  int ret = 0;
+  std::string key = source->get_key().to_str();
+  DaosBucket* daos_bucket = source->get_daos_bucket();
+  ret = daos_bucket->open(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Remove the daos object
+  ret = ds3_obj_destroy(key.c_str(), daos_bucket->ds3b);
+  ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_destroy key=" << key << " ret=" << ret
+                     << dendl;
+
+  // result.delete_marker = parent_op.result.delete_marker;
+  // result.version_id = parent_op.result.version_id;
+
+  return ret;
+}
+
+int DaosObject::delete_object(const DoutPrefixProvider* dpp, optional_yield y,
+                              bool prevent_versioning) {
+  ldpp_dout(dpp, 20) << "DEBUG: delete_object" << dendl;
+  DaosObject::DaosDeleteOp del_op(this);
+  del_op.params.bucket_owner = bucket->get_info().owner;
+  del_op.params.versioning_status = bucket->get_info().versioning_status();
+
+  return del_op.delete_obj(dpp, y);
+}
+
+int DaosObject::delete_obj_aio(const DoutPrefixProvider* dpp,
+                               RGWObjState* astate, Completions* aio,
+                               bool keep_index_consistent, optional_yield y) {
+  /* XXX: Make it async */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::copy_object(
+    User* user, req_info* info, const rgw_zone_id& source_zone,
+    rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
+    rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement,
+    ceph::real_time* src_mtime, ceph::real_time* mtime,
+    const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr,
+    bool high_precision_time, const char* if_match, const char* if_nomatch,
+    AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs,
+    RGWObjCategory category, uint64_t olh_epoch,
+    boost::optional<ceph::real_time> delete_at, std::string* version_id,
+    std::string* tag, std::string* etag, void (*progress_cb)(off_t, void*),
+    void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::swift_versioning_restore(bool& restored,
+                                         const DoutPrefixProvider* dpp) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
+                                      optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosObject::lookup(const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << "DEBUG: lookup" << dendl;
+  if (is_open()) {
+    return 0;
+  }
+
+  if (get_instance() == "null") {
+    clear_instance();
+  }
+
+  int ret = 0;
+  DaosBucket* daos_bucket = get_daos_bucket();
+  ret = daos_bucket->open(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  ret = ds3_obj_open(get_key().to_str().c_str(), &ds3o, daos_bucket->ds3b);
+
+  if (ret == -ENOENT) {
+    ldpp_dout(dpp, 20) << "DEBUG: daos object (" << get_bucket()->get_name()
+                       << ", " << get_key().to_str()
+                       << ") does not exist: ret=" << ret << dendl;
+  } else if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to open daos object ("
+                      << get_bucket()->get_name() << ", " << get_key().to_str()
+                      << "): ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+int DaosObject::create(const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << "DEBUG: create" << dendl;
+  if (is_open()) {
+    return 0;
+  }
+
+  if (get_instance() == "null") {
+    clear_instance();
+  }
+
+  int ret = 0;
+  DaosBucket* daos_bucket = get_daos_bucket();
+  ret = daos_bucket->open(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  ret = ds3_obj_create(get_key().to_str().c_str(), &ds3o, daos_bucket->ds3b);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to create daos object ("
+                      << get_bucket()->get_name() << ", " << get_key().to_str()
+                      << "): ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+int DaosObject::close(const DoutPrefixProvider* dpp) {
+  ldpp_dout(dpp, 20) << "DEBUG: close" << dendl;
+  if (!is_open()) {
+    return 0;
+  }
+
+  int ret = ds3_obj_close(ds3o);
+  ds3o = nullptr;
+  ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_close ret=" << ret << dendl;
+  return ret;
+}
+
+int DaosObject::write(const DoutPrefixProvider* dpp, bufferlist&& data,
+                      uint64_t offset) {
+  ldpp_dout(dpp, 20) << "DEBUG: write" << dendl;
+  uint64_t size = data.length();
+  int ret = ds3_obj_write(data.c_str(), offset, &size, get_daos_bucket()->ds3b,
+                          ds3o, nullptr);
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to write into daos object ("
+                      << get_bucket()->get_name() << ", " << get_key().to_str()
+                      << "): ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+int DaosObject::read(const DoutPrefixProvider* dpp, bufferlist& data,
+                     uint64_t offset, uint64_t& size) {
+  ldpp_dout(dpp, 20) << "DEBUG: read" << dendl;
+  int ret = ds3_obj_read(data.append_hole(size).c_str(), offset, &size,
+                         get_daos_bucket()->ds3b, ds3o, nullptr);
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to read from daos object ("
+                      << get_bucket()->get_name() << ", " << get_key().to_str()
+                      << "): ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+// Get the object's dirent and attrs
+int DaosObject::get_dir_entry_attrs(const DoutPrefixProvider* dpp,
+                                    rgw_bucket_dir_entry* ent,
+                                    Attrs* getattrs) {
+  ldpp_dout(dpp, 20) << "DEBUG: get_dir_entry_attrs" << dendl;
+  int ret = 0;
+  vector<uint8_t> value(DS3_MAX_ENCODED_LEN);
+  uint32_t size = value.size();
+
+  if (get_key().ns == RGW_OBJ_NS_MULTIPART) {
+    struct ds3_multipart_upload_info ui = {.encoded = value.data(),
+                                           .encoded_length = size};
+    ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(),
+                              get_key().to_str().c_str(), store->ds3);
+  } else {
+    ret = lookup(dpp);
+    if (ret != 0) {
+      return ret;
+    }
+
+    auto object_info = std::make_unique<struct ds3_object_info>();
+    object_info->encoded = value.data();
+    object_info->encoded_length = size;
+    ret = ds3_obj_get_info(object_info.get(), get_daos_bucket()->ds3b, ds3o);
+    size = object_info->encoded_length;
+  }
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to get info of daos object ("
+                      << get_bucket()->get_name() << ", " << get_key().to_str()
+                      << "): ret=" << ret << dendl;
+    return ret;
+  }
+
+  rgw_bucket_dir_entry dummy_ent;
+  if (!ent) {
+    // if ent is not passed, use a dummy ent
+    ent = &dummy_ent;
+  }
+
+  bufferlist bl;
+  bl.append(reinterpret_cast<char*>(value.data()), size);
+  auto iter = bl.cbegin();
+  ent->decode(iter);
+  if (getattrs) {
+    decode(*getattrs, iter);
+  }
+
+  return ret;
+}
+// Set the object's dirent and attrs
+int DaosObject::set_dir_entry_attrs(const DoutPrefixProvider* dpp,
+                                    rgw_bucket_dir_entry* ent,
+                                    Attrs* setattrs) {
+  ldpp_dout(dpp, 20) << "DEBUG: set_dir_entry_attrs" << dendl;
+  int ret = lookup(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Set defaults
+  if (!ent) {
+    // if ent is not passed, return an error
+    return -EINVAL;
+  }
+
+  if (!setattrs) {
+    // if setattrs is not passed, use object attrs
+    setattrs = &attrs;
+  }
+
+  bufferlist wbl;
+  ent->encode(wbl);
+  encode(*setattrs, wbl);
+
+  // Write rgw_bucket_dir_entry into object xattr
+  auto object_info = std::make_unique<struct ds3_object_info>();
+  object_info->encoded = wbl.c_str();
+  object_info->encoded_length = wbl.length();
+  ret = ds3_obj_set_info(object_info.get(), get_daos_bucket()->ds3b, ds3o);
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to set info of daos object ("
+                      << get_bucket()->get_name() << ", " << get_key().to_str()
+                      << "): ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+int DaosObject::mark_as_latest(const DoutPrefixProvider* dpp,
+                               ceph::real_time set_mtime) {
+  // TODO handle deletion
+  // TODO understand race conditions
+  ldpp_dout(dpp, 20) << "DEBUG: mark_as_latest" << dendl;
+
+  // Get latest version so far
+  std::unique_ptr<DaosObject> latest_object = std::make_unique<DaosObject>(
+      store, rgw_obj_key(get_name(), DS3_LATEST_INSTANCE), get_bucket());
+
+  ldpp_dout(dpp, 20) << __func__ << ": key=" << get_key().to_str()
+                     << " latest_object_key= "
+                     << latest_object->get_key().to_str() << dendl;
+
+  int ret = latest_object->lookup(dpp);
+  if (ret == 0) {
+    // Get metadata only if file exists
+    rgw_bucket_dir_entry latest_ent;
+    Attrs latest_attrs;
+    ret = latest_object->get_dir_entry_attrs(dpp, &latest_ent, &latest_attrs);
+    if (ret != 0) {
+      return ret;
+    }
+
+    // Update flags
+    latest_ent.flags = rgw_bucket_dir_entry::FLAG_VER;
+    latest_ent.meta.mtime = set_mtime;
+    ret = latest_object->set_dir_entry_attrs(dpp, &latest_ent, &latest_attrs);
+    if (ret != 0) {
+      return ret;
+    }
+  }
+
+  // Get or create the link [latest], make it link to the current latest
+  // version.
+  ret =
+      ds3_obj_mark_latest(get_key().to_str().c_str(), get_daos_bucket()->ds3b);
+  ldpp_dout(dpp, 20) << "DEBUG: ds3_obj_mark_latest ret=" << ret << dendl;
+  return ret;
+}
+
+DaosAtomicWriter::DaosAtomicWriter(
+    const DoutPrefixProvider* dpp, optional_yield y,
+    std::unique_ptr<rgw::sal::Object> _head_obj, DaosStore* _store,
+    const rgw_user& _owner, const rgw_placement_rule* _ptail_placement_rule,
+    uint64_t _olh_epoch, const std::string& _unique_tag)
+    : StoreWriter(dpp, y),
+      store(_store),
+      owner(_owner),
+      ptail_placement_rule(_ptail_placement_rule),
+      olh_epoch(_olh_epoch),
+      unique_tag(_unique_tag),
+      obj(_store, _head_obj->get_key(), _head_obj->get_bucket()) {}
+
+int DaosAtomicWriter::prepare(optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: prepare" << dendl;
+  int ret = obj.create(dpp);
+  return ret;
+}
+
+// TODO: Handle concurrent writes, a unique object id is a possible solution, or
+// use DAOS transactions
+// XXX: Do we need to accumulate writes as motr does?
+int DaosAtomicWriter::process(bufferlist&& data, uint64_t offset) {
+  ldpp_dout(dpp, 20) << "DEBUG: process" << dendl;
+  if (data.length() == 0) {
+    return 0;
+  }
+
+  int ret = 0;
+  if (!obj.is_open()) {
+    ret = obj.lookup(dpp);
+    if (ret != 0) {
+      return ret;
+    }
+  }
+
+  // XXX: Combine multiple streams into one as motr does
+  uint64_t data_size = data.length();
+  ret = obj.write(dpp, std::move(data), offset);
+  if (ret == 0) {
+    total_data_size += data_size;
+  }
+  return ret;
+}
+
+int DaosAtomicWriter::complete(
+    size_t accounted_size, const std::string& etag, ceph::real_time* mtime,
+    ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs,
+    ceph::real_time delete_at, const char* if_match, const char* if_nomatch,
+    const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled,
+    optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: complete" << dendl;
+  bufferlist bl;
+  rgw_bucket_dir_entry ent;
+  int ret;
+
+  // Set rgw_bucet_dir_entry. Some of the members of this structure may not
+  // apply to daos.
+  //
+  // Checkout AtomicObjectProcessor::complete() in rgw_putobj_processor.cc
+  // and RGWRados::Object::Write::write_meta() in rgw_rados.cc for what and
+  // how to set the dir entry. Only set the basic ones for POC, no ACLs and
+  // other attrs.
+  obj.get_key().get_index_key(&ent.key);
+  ent.meta.size = total_data_size;
+  ent.meta.accounted_size = accounted_size;
+  ent.meta.mtime =
+      real_clock::is_zero(set_mtime) ? ceph::real_clock::now() : set_mtime;
+  ent.meta.etag = etag;
+  ent.meta.owner = owner.to_str();
+  ent.meta.owner_display_name =
+      obj.get_bucket()->get_owner()->get_display_name();
+  bool is_versioned = obj.get_bucket()->versioned();
+  if (is_versioned)
+    ent.flags =
+        rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT;
+  ldpp_dout(dpp, 20) << __func__ << ": key=" << obj.get_key().to_str()
+                     << " etag: " << etag << dendl;
+  if (user_data) ent.meta.user_data = *user_data;
+
+  RGWBucketInfo& info = obj.get_bucket()->get_info();
+  if (info.obj_lock_enabled() && info.obj_lock.has_rule()) {
+    auto iter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
+    if (iter == attrs.end()) {
+      real_time lock_until_date =
+          info.obj_lock.get_lock_until_date(ent.meta.mtime);
+      string mode = info.obj_lock.get_mode();
+      RGWObjectRetention obj_retention(mode, lock_until_date);
+      bufferlist retention_bl;
+      obj_retention.encode(retention_bl);
+      attrs[RGW_ATTR_OBJECT_RETENTION] = retention_bl;
+    }
+  }
+
+  ret = obj.set_dir_entry_attrs(dpp, &ent, &attrs);
+
+  if (is_versioned) {
+    ret = obj.mark_as_latest(dpp, set_mtime);
+    if (ret != 0) {
+      return ret;
+    }
+  }
+
+  return ret;
+}
+
+int DaosMultipartUpload::abort(const DoutPrefixProvider* dpp,
+                               CephContext* cct) {
+  // Remove upload from bucket multipart index
+  ldpp_dout(dpp, 20) << "DEBUG: abort" << dendl;
+  return ds3_upload_remove(bucket->get_name().c_str(), get_upload_id().c_str(),
+                           store->ds3);
+}
+
+std::unique_ptr<rgw::sal::Object> DaosMultipartUpload::get_meta_obj() {
+  return bucket->get_object(
+      rgw_obj_key(get_upload_id(), string(), RGW_OBJ_NS_MULTIPART));
+}
+
+int DaosMultipartUpload::init(const DoutPrefixProvider* dpp, optional_yield y,
+                              ACLOwner& _owner,
+                              rgw_placement_rule& dest_placement,
+                              rgw::sal::Attrs& attrs) {
+  ldpp_dout(dpp, 20) << "DEBUG: init" << dendl;
+  int ret;
+  std::string oid = mp_obj.get_key();
+
+  // Create an initial entry in the bucket. The entry will be
+  // updated when multipart upload is completed, for example,
+  // size, etag etc.
+  bufferlist bl;
+  rgw_bucket_dir_entry ent;
+  ent.key.name = oid;
+  ent.meta.owner = owner.get_id().to_str();
+  ent.meta.category = RGWObjCategory::MultiMeta;
+  ent.meta.mtime = ceph::real_clock::now();
+
+  multipart_upload_info upload_info;
+  upload_info.dest_placement = dest_placement;
+
+  ent.encode(bl);
+  encode(attrs, bl);
+  encode(upload_info, bl);
+
+  struct ds3_multipart_upload_info ui;
+  std::strcpy(ui.upload_id, MULTIPART_UPLOAD_ID_PREFIX);
+  std::strncpy(ui.key, oid.c_str(), sizeof(ui.key));
+  ui.encoded = bl.c_str();
+  ui.encoded_length = bl.length();
+  int prefix_length = strlen(ui.upload_id);
+
+  do {
+    gen_rand_alphanumeric(store->ctx(), ui.upload_id + prefix_length,
+                          sizeof(ui.upload_id) - 1 - prefix_length);
+    mp_obj.init(oid, ui.upload_id);
+    ret = ds3_upload_init(&ui, bucket->get_name().c_str(), store->ds3);
+  } while (ret == -EEXIST);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to create multipart upload dir ("
+                      << bucket->get_name() << "/" << get_upload_id()
+                      << "): ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+int DaosMultipartUpload::list_parts(const DoutPrefixProvider* dpp,
+                                    CephContext* cct, int num_parts, int marker,
+                                    int* next_marker, bool* truncated,
+                                    bool assume_unsorted) {
+  ldpp_dout(dpp, 20) << "DEBUG: list_parts" << dendl;
+  // Init needed structures
+  vector<struct ds3_multipart_part_info> multipart_part_infos(num_parts);
+  uint32_t npart = multipart_part_infos.size();
+  vector<vector<uint8_t>> values(npart, vector<uint8_t>(DS3_MAX_ENCODED_LEN));
+  for (uint32_t i = 0; i < npart; i++) {
+    multipart_part_infos[i].encoded = values[i].data();
+    multipart_part_infos[i].encoded_length = values[i].size();
+  }
+
+  uint32_t daos_marker = marker;
+  int ret = ds3_upload_list_parts(
+      bucket->get_name().c_str(), get_upload_id().c_str(), &npart,
+      multipart_part_infos.data(), &daos_marker, truncated, store->ds3);
+
+  if (ret != 0) {
+    if (ret == -ENOENT) {
+      ret = -ERR_NO_SUCH_UPLOAD;
+    }
+    return ret;
+  }
+
+  multipart_part_infos.resize(npart);
+  values.resize(npart);
+  parts.clear();
+
+  for (auto const& pi : multipart_part_infos) {
+    bufferlist bl;
+    bl.append(reinterpret_cast<char*>(pi.encoded), pi.encoded_length);
+
+    std::unique_ptr<DaosMultipartPart> part =
+        std::make_unique<DaosMultipartPart>();
+    auto iter = bl.cbegin();
+    decode(part->info, iter);
+    parts[pi.part_num] = std::move(part);
+  }
+
+  if (next_marker) {
+    *next_marker = daos_marker;
+  }
+  return ret;
+}
+
+// Heavily copied from rgw_sal_rados.cc
+int DaosMultipartUpload::complete(
+    const DoutPrefixProvider* dpp, optional_yield y, CephContext* cct,
+    map<int, string>& part_etags, list<rgw_obj_index_key>& remove_objs,
+    uint64_t& accounted_size, bool& compressed, RGWCompressionInfo& cs_info,
+    off_t& off, std::string& tag, ACLOwner& owner, uint64_t olh_epoch,
+    rgw::sal::Object* target_obj) {
+  ldpp_dout(dpp, 20) << "DEBUG: complete" << dendl;
+  char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
+  char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
+  std::string etag;
+  bufferlist etag_bl;
+  MD5 hash;
+  // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
+  hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+  bool truncated;
+  int ret;
+
+  ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): enter" << dendl;
+  int total_parts = 0;
+  int handled_parts = 0;
+  int max_parts = 1000;
+  int marker = 0;
+  uint64_t min_part_size = cct->_conf->rgw_multipart_min_part_size;
+  auto etags_iter = part_etags.begin();
+  rgw::sal::Attrs attrs = target_obj->get_attrs();
+
+  do {
+    ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): list_parts()"
+                       << dendl;
+    ret = list_parts(dpp, cct, max_parts, marker, &marker, &truncated);
+    if (ret == -ENOENT) {
+      ret = -ERR_NO_SUCH_UPLOAD;
+    }
+    if (ret != 0) return ret;
+
+    total_parts += parts.size();
+    if (!truncated && total_parts != (int)part_etags.size()) {
+      ldpp_dout(dpp, 0) << "NOTICE: total parts mismatch: have: " << total_parts
+                        << " expected: " << part_etags.size() << dendl;
+      ret = -ERR_INVALID_PART;
+      return ret;
+    }
+    ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): parts.size()="
+                       << parts.size() << dendl;
+
+    for (auto obj_iter = parts.begin();
+         etags_iter != part_etags.end() && obj_iter != parts.end();
+         ++etags_iter, ++obj_iter, ++handled_parts) {
+      DaosMultipartPart* part =
+          dynamic_cast<rgw::sal::DaosMultipartPart*>(obj_iter->second.get());
+      uint64_t part_size = part->get_size();
+      ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part_size="
+                         << part_size << dendl;
+      if (handled_parts < (int)part_etags.size() - 1 &&
+          part_size < min_part_size) {
+        ret = -ERR_TOO_SMALL;
+        return ret;
+      }
+
+      char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
+      if (etags_iter->first != (int)obj_iter->first) {
+        ldpp_dout(dpp, 0) << "NOTICE: parts num mismatch: next requested: "
+                          << etags_iter->first
+                          << " next uploaded: " << obj_iter->first << dendl;
+        ret = -ERR_INVALID_PART;
+        return ret;
+      }
+      string part_etag = rgw_string_unquote(etags_iter->second);
+      if (part_etag.compare(part->get_etag()) != 0) {
+        ldpp_dout(dpp, 0) << "NOTICE: etag mismatch: part: "
+                          << etags_iter->first
+                          << " etag: " << etags_iter->second << dendl;
+        ret = -ERR_INVALID_PART;
+        return ret;
+      }
+
+      hex_to_buf(part->get_etag().c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE);
+      hash.Update((const unsigned char*)petag, sizeof(petag));
+      ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): calc etag "
+                         << dendl;
+
+      RGWUploadPartInfo& obj_part = part->info;
+      string oid = mp_obj.get_part(obj_part.num);
+      rgw_obj src_obj;
+      src_obj.init_ns(bucket->get_key(), oid, RGW_OBJ_NS_MULTIPART);
+
+      bool part_compressed = (obj_part.cs_info.compression_type != "none");
+      if ((handled_parts > 0) &&
+          ((part_compressed != compressed) ||
+           (cs_info.compression_type != obj_part.cs_info.compression_type))) {
+        ldpp_dout(dpp, 0)
+            << "ERROR: compression type was changed during multipart upload ("
+            << cs_info.compression_type << ">>"
+            << obj_part.cs_info.compression_type << ")" << dendl;
+        ret = -ERR_INVALID_PART;
+        return ret;
+      }
+
+      ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part compression"
+                         << dendl;
+      if (part_compressed) {
+        int64_t new_ofs;  // offset in compression data for new part
+        if (cs_info.blocks.size() > 0)
+          new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
+        else
+          new_ofs = 0;
+        for (const auto& block : obj_part.cs_info.blocks) {
+          compression_block cb;
+          cb.old_ofs = block.old_ofs + cs_info.orig_size;
+          cb.new_ofs = new_ofs;
+          cb.len = block.len;
+          cs_info.blocks.push_back(cb);
+          new_ofs = cb.new_ofs + cb.len;
+        }
+        if (!compressed)
+          cs_info.compression_type = obj_part.cs_info.compression_type;
+        cs_info.orig_size += obj_part.cs_info.orig_size;
+        compressed = true;
+      }
+
+      // We may not need to do the following as remove_objs are those
+      // don't show when listing a bucket. As we store in-progress uploaded
+      // object's metadata in a separate index, they are not shown when
+      // listing a bucket.
+      rgw_obj_index_key remove_key;
+      src_obj.key.get_index_key(&remove_key);
+
+      remove_objs.push_back(remove_key);
+
+      off += obj_part.size;
+      accounted_size += obj_part.accounted_size;
+      ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): off=" << off
+                         << ", accounted_size = " << accounted_size << dendl;
+    }
+  } while (truncated);
+  hash.Final((unsigned char*)final_etag);
+
+  buf_to_hex((unsigned char*)final_etag, sizeof(final_etag), final_etag_str);
+  snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2],
+           sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, "-%lld",
+           (long long)part_etags.size());
+  etag = final_etag_str;
+  ldpp_dout(dpp, 10) << "calculated etag: " << etag << dendl;
+
+  etag_bl.append(etag);
+
+  attrs[RGW_ATTR_ETAG] = etag_bl;
+
+  if (compressed) {
+    // write compression attribute to full object
+    bufferlist tmp;
+    encode(cs_info, tmp);
+    attrs[RGW_ATTR_COMPRESSION] = tmp;
+  }
+
+  // Different from rgw_sal_rados.cc starts here
+  // Read the object's multipart info
+  bufferlist bl;
+  uint64_t size = DS3_MAX_ENCODED_LEN;
+  struct ds3_multipart_upload_info ui = {
+      .encoded = bl.append_hole(size).c_str(), .encoded_length = size};
+  ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(),
+                            get_upload_id().c_str(), store->ds3);
+  ldpp_dout(dpp, 20) << "DEBUG: ds3_upload_get_info entry="
+                     << bucket->get_name() << "/" << get_upload_id() << dendl;
+  if (ret != 0) {
+    if (ret == -ENOENT) {
+      ret = -ERR_NO_SUCH_UPLOAD;
+    }
+    return ret;
+  }
+
+  rgw_bucket_dir_entry ent;
+  auto iter = bl.cbegin();
+  ent.decode(iter);
+
+  // Update entry data and name
+  target_obj->get_key().get_index_key(&ent.key);
+  ent.meta.size = off;
+  ent.meta.accounted_size = accounted_size;
+  ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): obj size="
+                     << ent.meta.size
+                     << " obj accounted size=" << ent.meta.accounted_size
+                     << dendl;
+  ent.meta.category = RGWObjCategory::Main;
+  ent.meta.mtime = ceph::real_clock::now();
+  bool is_versioned = target_obj->get_bucket()->versioned();
+  if (is_versioned)
+    ent.flags =
+        rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT;
+  ent.meta.etag = etag;
+
+  // Open object
+  DaosObject* obj = static_cast<DaosObject*>(target_obj);
+  ret = obj->create(dpp);
+  if (ret != 0) {
+    return ret;
+  }
+
+  // Copy data from parts to object
+  uint64_t write_off = 0;
+  for (auto const& [part_num, part] : get_parts()) {
+    ds3_part_t* ds3p;
+    ret = ds3_part_open(get_bucket_name().c_str(), get_upload_id().c_str(),
+                        part_num, false, &ds3p, store->ds3);
+    if (ret != 0) {
+      return ret;
+    }
+
+    // Reserve buffers and read
+    uint64_t size = part->get_size();
+    bufferlist bl;
+    ret = ds3_part_read(bl.append_hole(size).c_str(), 0, &size, ds3p,
+                        store->ds3, nullptr);
+    if (ret != 0) {
+      ds3_part_close(ds3p);
+      return ret;
+    }
+
+    ldpp_dout(dpp, 20) << "DaosMultipartUpload::complete(): part " << part_num
+                       << " size is " << size << dendl;
+
+    // write to obj
+    obj->write(dpp, std::move(bl), write_off);
+    ds3_part_close(ds3p);
+    write_off += part->get_size();
+  }
+
+  // Set attributes
+  ret = obj->set_dir_entry_attrs(dpp, &ent, &attrs);
+
+  if (is_versioned) {
+    ret = obj->mark_as_latest(dpp, ent.meta.mtime);
+    if (ret != 0) {
+      return ret;
+    }
+  }
+
+  // Remove upload from bucket multipart index
+  ret = ds3_upload_remove(get_bucket_name().c_str(), get_upload_id().c_str(),
+                          store->ds3);
+  return ret;
+}
+
+int DaosMultipartUpload::get_info(const DoutPrefixProvider* dpp,
+                                  optional_yield y, rgw_placement_rule** rule,
+                                  rgw::sal::Attrs* attrs) {
+  ldpp_dout(dpp, 20) << "DaosMultipartUpload::get_info(): enter" << dendl;
+  if (!rule && !attrs) {
+    return 0;
+  }
+
+  if (rule) {
+    if (!placement.empty()) {
+      *rule = &placement;
+      if (!attrs) {
+        // Don't need attrs, done
+        return 0;
+      }
+    } else {
+      *rule = nullptr;
+    }
+  }
+
+  // Read the multipart upload dirent from index
+  bufferlist bl;
+  uint64_t size = DS3_MAX_ENCODED_LEN;
+  struct ds3_multipart_upload_info ui = {
+      .encoded = bl.append_hole(size).c_str(), .encoded_length = size};
+  int ret = ds3_upload_get_info(&ui, bucket->get_name().c_str(),
+                                get_upload_id().c_str(), store->ds3);
+
+  if (ret != 0) {
+    if (ret == -ENOENT) {
+      ret = -ERR_NO_SUCH_UPLOAD;
+    }
+    return ret;
+  }
+
+  multipart_upload_info upload_info;
+  rgw_bucket_dir_entry ent;
+  Attrs decoded_attrs;
+  auto iter = bl.cbegin();
+  ent.decode(iter);
+  decode(decoded_attrs, iter);
+  ldpp_dout(dpp, 20) << "DEBUG: decoded_attrs=" << attrs << dendl;
+
+  if (attrs) {
+    *attrs = decoded_attrs;
+    if (!rule || *rule != nullptr) {
+      // placement was cached; don't actually read
+      return 0;
+    }
+  }
+
+  // Now decode the placement rule
+  decode(upload_info, iter);
+  placement = upload_info.dest_placement;
+  *rule = &placement;
+
+  return 0;
+}
+
+std::unique_ptr<Writer> DaosMultipartUpload::get_writer(
+    const DoutPrefixProvider* dpp, optional_yield y,
+    std::unique_ptr<rgw::sal::Object> _head_obj, const rgw_user& owner,
+    const rgw_placement_rule* ptail_placement_rule, uint64_t part_num,
+    const std::string& part_num_str) {
+  ldpp_dout(dpp, 20) << "DaosMultipartUpload::get_writer(): enter part="
+                     << part_num << " head_obj=" << _head_obj << dendl;
+  return std::make_unique<DaosMultipartWriter>(
+      dpp, y, this, std::move(_head_obj), store, owner, ptail_placement_rule,
+      part_num, part_num_str);
+}
+
+DaosMultipartWriter::~DaosMultipartWriter() {
+  if (is_open()) ds3_part_close(ds3p);
+}
+
+int DaosMultipartWriter::prepare(optional_yield y) {
+  ldpp_dout(dpp, 20) << "DaosMultipartWriter::prepare(): enter part="
+                     << part_num_str << dendl;
+  int ret = ds3_part_open(get_bucket_name().c_str(), upload_id.c_str(),
+                          part_num, true, &ds3p, store->ds3);
+  if (ret == -ENOENT) {
+    ret = -ERR_NO_SUCH_UPLOAD;
+  }
+  return ret;
+}
+
+const std::string& DaosMultipartWriter::get_bucket_name() {
+  return static_cast<DaosMultipartUpload*>(upload)->get_bucket_name();
+}
+
+int DaosMultipartWriter::process(bufferlist&& data, uint64_t offset) {
+  ldpp_dout(dpp, 20) << "DaosMultipartWriter::process(): enter part="
+                     << part_num_str << " offset=" << offset << dendl;
+  if (data.length() == 0) {
+    return 0;
+  }
+
+  uint64_t size = data.length();
+  int ret =
+      ds3_part_write(data.c_str(), offset, &size, ds3p, store->ds3, nullptr);
+  if (ret == 0) {
+    // XXX: Combine multiple streams into one as motr does
+    actual_part_size += size;
+  } else {
+    ldpp_dout(dpp, 0) << "ERROR: failed to write into part ("
+                      << get_bucket_name() << ", " << upload_id << ", "
+                      << part_num << "): ret=" << ret << dendl;
+  }
+  return ret;
+}
+
+int DaosMultipartWriter::complete(
+    size_t accounted_size, const std::string& etag, ceph::real_time* mtime,
+    ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs,
+    ceph::real_time delete_at, const char* if_match, const char* if_nomatch,
+    const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled,
+    optional_yield y) {
+  ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): enter part="
+                     << part_num_str << dendl;
+
+  // Add an entry into part index
+  bufferlist bl;
+  RGWUploadPartInfo info;
+  info.num = part_num;
+  info.etag = etag;
+  info.size = actual_part_size;
+  info.accounted_size = accounted_size;
+  info.modified = real_clock::now();
+
+  bool compressed;
+  int ret = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
+  ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): compression ret="
+                     << ret << dendl;
+  if (ret != 0) {
+    ldpp_dout(dpp, 1) << "cannot get compression info" << dendl;
+    return ret;
+  }
+  encode(info, bl);
+  encode(attrs, bl);
+  ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): entry size"
+                     << bl.length() << dendl;
+
+  struct ds3_multipart_part_info part_info = {.part_num = part_num,
+                                              .encoded = bl.c_str(),
+                                              .encoded_length = bl.length()};
+
+  ret = ds3_part_set_info(&part_info, ds3p, store->ds3, nullptr);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to set part info (" << get_bucket_name()
+                      << ", " << upload_id << ", " << part_num
+                      << "): ret=" << ret << dendl;
+    if (ret == ENOENT) {
+      ret = -ERR_NO_SUCH_UPLOAD;
+    }
+  }
+
+  return ret;
+}
+
+std::unique_ptr<RGWRole> DaosStore::get_role(
+    std::string name, std::string tenant, std::string path,
+    std::string trust_policy, std::string max_session_duration_str,
+    std::multimap<std::string, std::string> tags) {
+  RGWRole* p = nullptr;
+  return std::unique_ptr<RGWRole>(p);
+}
+
+std::unique_ptr<RGWRole> DaosStore::get_role(const RGWRoleInfo& info) {
+  RGWRole* p = nullptr;
+  return std::unique_ptr<RGWRole>(p);
+}
+
+std::unique_ptr<RGWRole> DaosStore::get_role(std::string id) {
+  RGWRole* p = nullptr;
+  return std::unique_ptr<RGWRole>(p);
+}
+
+int DaosStore::get_roles(const DoutPrefixProvider* dpp, optional_yield y,
+                         const std::string& path_prefix,
+                         const std::string& tenant,
+                         vector<std::unique_ptr<RGWRole>>& roles) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+std::unique_ptr<RGWOIDCProvider> DaosStore::get_oidc_provider() {
+  RGWOIDCProvider* p = nullptr;
+  return std::unique_ptr<RGWOIDCProvider>(p);
+}
+
+int DaosStore::get_oidc_providers(
+    const DoutPrefixProvider* dpp, const std::string& tenant,
+    vector<std::unique_ptr<RGWOIDCProvider>>& providers) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+std::unique_ptr<MultipartUpload> DaosBucket::get_multipart_upload(
+    const std::string& oid, std::optional<std::string> upload_id,
+    ACLOwner owner, ceph::real_time mtime) {
+  return std::make_unique<DaosMultipartUpload>(store, this, oid, upload_id,
+                                               owner, mtime);
+}
+
+std::unique_ptr<Writer> DaosStore::get_append_writer(
+    const DoutPrefixProvider* dpp, optional_yield y,
+    std::unique_ptr<rgw::sal::Object> _head_obj, const rgw_user& owner,
+    const rgw_placement_rule* ptail_placement_rule,
+    const std::string& unique_tag, uint64_t position,
+    uint64_t* cur_accounted_size) {
+  DAOS_NOT_IMPLEMENTED_LOG(dpp);
+  return nullptr;
+}
+
+std::unique_ptr<Writer> DaosStore::get_atomic_writer(
+    const DoutPrefixProvider* dpp, optional_yield y,
+    std::unique_ptr<rgw::sal::Object> _head_obj, const rgw_user& owner,
+    const rgw_placement_rule* ptail_placement_rule, uint64_t olh_epoch,
+    const std::string& unique_tag) {
+  ldpp_dout(dpp, 20) << "get_atomic_writer" << dendl;
+  return std::make_unique<DaosAtomicWriter>(dpp, y, std::move(_head_obj), this,
+                                            owner, ptail_placement_rule,
+                                            olh_epoch, unique_tag);
+}
+
+const std::string& DaosStore::get_compression_type(
+    const rgw_placement_rule& rule) {
+  return zone.zone_params->get_compression_type(rule);
+}
+
+bool DaosStore::valid_placement(const rgw_placement_rule& rule) {
+  return zone.zone_params->valid_placement(rule);
+}
+
+std::unique_ptr<User> DaosStore::get_user(const rgw_user& u) {
+  ldout(cctx, 20) << "DEBUG: bucket's user:  " << u.to_str() << dendl;
+  return std::make_unique<DaosUser>(this, u);
+}
+
+int DaosStore::get_user_by_access_key(const DoutPrefixProvider* dpp,
+                                      const std::string& key, optional_yield y,
+                                      std::unique_ptr<User>* user) {
+  // Initialize ds3_user_info
+  bufferlist bl;
+  uint64_t size = DS3_MAX_ENCODED_LEN;
+  struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(),
+                                    .encoded_length = size};
+
+  int ret = ds3_user_get_by_key(key.c_str(), &user_info, ds3, nullptr);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "Error: ds3_user_get_by_key failed, key=" << key
+                      << " ret=" << ret << dendl;
+    return ret;
+  }
+
+  // Decode
+  DaosUserInfo duinfo;
+  bufferlist& blr = bl;
+  auto iter = blr.cbegin();
+  duinfo.decode(iter);
+
+  User* u = new DaosUser(this, duinfo.info);
+  if (!u) {
+    return -ENOMEM;
+  }
+
+  user->reset(u);
+  return 0;
+}
+
+int DaosStore::get_user_by_email(const DoutPrefixProvider* dpp,
+                                 const std::string& email, optional_yield y,
+                                 std::unique_ptr<User>* user) {
+  // Initialize ds3_user_info
+  bufferlist bl;
+  uint64_t size = DS3_MAX_ENCODED_LEN;
+  struct ds3_user_info user_info = {.encoded = bl.append_hole(size).c_str(),
+                                    .encoded_length = size};
+
+  int ret = ds3_user_get_by_email(email.c_str(), &user_info, ds3, nullptr);
+
+  if (ret != 0) {
+    ldpp_dout(dpp, 0) << "Error: ds3_user_get_by_email failed, email=" << email
+                      << " ret=" << ret << dendl;
+    return ret;
+  }
+
+  // Decode
+  DaosUserInfo duinfo;
+  bufferlist& blr = bl;
+  auto iter = blr.cbegin();
+  duinfo.decode(iter);
+
+  User* u = new DaosUser(this, duinfo.info);
+  if (!u) {
+    return -ENOMEM;
+  }
+
+  user->reset(u);
+  return 0;
+}
+
+int DaosStore::get_user_by_swift(const DoutPrefixProvider* dpp,
+                                 const std::string& user_str, optional_yield y,
+                                 std::unique_ptr<User>* user) {
+  /* Swift keys and subusers are not supported for now */
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+std::unique_ptr<Object> DaosStore::get_object(const rgw_obj_key& k) {
+  return std::make_unique<DaosObject>(this, k);
+}
+
+inline std::ostream& operator<<(std::ostream& out, const rgw_user* u) {
+  std::string s;
+  if (u != nullptr)
+    u->to_str(s);
+  else
+    s = "(nullptr)";
+  return out << s;
+}
+
+int DaosStore::get_bucket(const DoutPrefixProvider* dpp, User* u,
+                          const rgw_bucket& b, std::unique_ptr<Bucket>* bucket,
+                          optional_yield y) {
+  ldpp_dout(dpp, 20) << "DEBUG: get_bucket1: User: " << u << dendl;
+  int ret;
+  Bucket* bp;
+
+  bp = new DaosBucket(this, b, u);
+  ret = bp->load_bucket(dpp, y);
+  if (ret != 0) {
+    delete bp;
+    return ret;
+  }
+
+  bucket->reset(bp);
+  return 0;
+}
+
+int DaosStore::get_bucket(User* u, const RGWBucketInfo& i,
+                          std::unique_ptr<Bucket>* bucket) {
+  DaosBucket* bp;
+
+  bp = new DaosBucket(this, i, u);
+  /* Don't need to fetch the bucket info, use the provided one */
+
+  bucket->reset(bp);
+  return 0;
+}
+
+int DaosStore::get_bucket(const DoutPrefixProvider* dpp, User* u,
+                          const std::string& tenant, const std::string& name,
+                          std::unique_ptr<Bucket>* bucket, optional_yield y) {
+  ldpp_dout(dpp, 20) << "get_bucket" << dendl;
+  rgw_bucket b;
+
+  b.tenant = tenant;
+  b.name = name;
+
+  return get_bucket(dpp, u, b, bucket, y);
+}
+
+bool DaosStore::is_meta_master() { return true; }
+
+int DaosStore::forward_request_to_master(const DoutPrefixProvider* dpp,
+                                         User* user, obj_version* objv,
+                                         bufferlist& in_data, JSONParser* jp,
+                                         req_info& info, optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::forward_iam_request_to_master(const DoutPrefixProvider* dpp,
+                                             const RGWAccessKey& key,
+                                             obj_version* objv,
+                                             bufferlist& in_data,
+                                             RGWXMLDecoder::XMLParser* parser,
+                                             req_info& info, optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+std::string DaosStore::zone_unique_id(uint64_t unique_num) { return ""; }
+
+std::string DaosStore::zone_unique_trans_id(const uint64_t unique_num) {
+  return "";
+}
+
+int DaosStore::cluster_stat(RGWClusterStat& stats) {
+  return DAOS_NOT_IMPLEMENTED_LOG(nullptr);
+}
+
+std::unique_ptr<Lifecycle> DaosStore::get_lifecycle(void) {
+  DAOS_NOT_IMPLEMENTED_LOG(nullptr);
+  return 0;
+}
+
+std::unique_ptr<Completions> DaosStore::get_completions(void) {
+  DAOS_NOT_IMPLEMENTED_LOG(nullptr);
+  return 0;
+}
+
+std::unique_ptr<Notification> DaosStore::get_notification(
+    rgw::sal::Object* obj, rgw::sal::Object* src_obj, struct req_state* s,
+    rgw::notify::EventType event_type, const std::string* object_name) {
+  return std::make_unique<DaosNotification>(obj, src_obj, event_type);
+}
+
+std::unique_ptr<Notification> DaosStore::get_notification(
+    const DoutPrefixProvider* dpp, Object* obj, Object* src_obj,
+    rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket,
+    std::string& _user_id, std::string& _user_tenant, std::string& _req_id,
+    optional_yield y) {
+  ldpp_dout(dpp, 20) << "get_notification" << dendl;
+  return std::make_unique<DaosNotification>(obj, src_obj, event_type);
+}
+
+int DaosStore::log_usage(const DoutPrefixProvider* dpp,
+                         map<rgw_user_bucket, RGWUsageBatch>& usage_info) {
+  DAOS_NOT_IMPLEMENTED_LOG(dpp);
+  return 0;
+}
+
+int DaosStore::log_op(const DoutPrefixProvider* dpp, string& oid,
+                      bufferlist& bl) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::register_to_service_map(const DoutPrefixProvider* dpp,
+                                       const string& daemon_type,
+                                       const map<string, string>& meta) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+void DaosStore::get_quota(RGWQuota& quota) {
+  // XXX: Not handled for the first pass
+  return;
+}
+
+void DaosStore::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit,
+                              RGWRateLimitInfo& user_ratelimit,
+                              RGWRateLimitInfo& anon_ratelimit) {
+  return;
+}
+
+int DaosStore::set_buckets_enabled(const DoutPrefixProvider* dpp,
+                                   std::vector<rgw_bucket>& buckets,
+                                   bool enabled) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::get_sync_policy_handler(const DoutPrefixProvider* dpp,
+                                       std::optional<rgw_zone_id> zone,
+                                       std::optional<rgw_bucket> bucket,
+                                       RGWBucketSyncPolicyHandlerRef* phandler,
+                                       optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+RGWDataSyncStatusManager* DaosStore::get_data_sync_manager(
+    const rgw_zone_id& source_zone) {
+  DAOS_NOT_IMPLEMENTED_LOG(nullptr);
+  return 0;
+}
+
+int DaosStore::read_all_usage(
+    const DoutPrefixProvider* dpp, uint64_t start_epoch, uint64_t end_epoch,
+    uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter,
+    map<rgw_user_bucket, rgw_usage_log_entry>& usage) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::trim_all_usage(const DoutPrefixProvider* dpp,
+                              uint64_t start_epoch, uint64_t end_epoch) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::get_config_key_val(string name, bufferlist* bl) {
+  return DAOS_NOT_IMPLEMENTED_LOG(nullptr);
+}
+
+int DaosStore::meta_list_keys_init(const DoutPrefixProvider* dpp,
+                                   const string& section, const string& marker,
+                                   void** phandle) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::meta_list_keys_next(const DoutPrefixProvider* dpp, void* handle,
+                                   int max, list<string>& keys,
+                                   bool* truncated) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+void DaosStore::meta_list_keys_complete(void* handle) { return; }
+
+std::string DaosStore::meta_get_marker(void* handle) { return ""; }
+
+int DaosStore::meta_remove(const DoutPrefixProvider* dpp, string& metadata_key,
+                           optional_yield y) {
+  return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+std::string DaosStore::get_cluster_id(const DoutPrefixProvider* dpp,
+                                      optional_yield y) {
+  DAOS_NOT_IMPLEMENTED_LOG(dpp);
+  return "";
+}
+
+}  // namespace rgw::sal
+
+extern "C" {
+
+void* newDaosStore(CephContext* cct) {
+  return new rgw::sal::DaosStore(cct);
+}
+}
diff --git a/src/rgw/rgw_sal_daos.h b/src/rgw/rgw_sal_daos.h
new file mode 100644 (file)
index 0000000..910ddea
--- /dev/null
@@ -0,0 +1,1052 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=2 sw=2 expandtab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * SAL implementation for the CORTX Daos backend
+ *
+ * Copyright (C) 2022 Seagate Technology LLC and/or its Affiliates
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <daos.h>
+#include <daos_s3.h>
+#include <uuid/uuid.h>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "rgw_multi.h"
+#include "rgw_notify.h"
+#include "rgw_oidc_provider.h"
+#include "rgw_putobj_processor.h"
+#include "rgw_rados.h"
+#include "rgw_role.h"
+#include "rgw_sal_store.h"
+
+inline bool IsDebuggerAttached() {
+#ifdef DEBUG
+  char buf[4096];
+
+  const int status_fd = ::open("/proc/self/status", O_RDONLY);
+  if (status_fd == -1) return false;
+
+  const ssize_t num_read = ::read(status_fd, buf, sizeof(buf) - 1);
+  ::close(status_fd);
+
+  if (num_read <= 0) return false;
+
+  buf[num_read] = '\0';
+  constexpr char tracerPidString[] = "TracerPid:";
+  const auto tracer_pid_ptr = ::strstr(buf, tracerPidString);
+  if (!tracer_pid_ptr) return false;
+
+  for (const char* characterPtr = tracer_pid_ptr + sizeof(tracerPidString) - 1;
+       characterPtr <= buf + num_read; ++characterPtr) {
+    if (::isspace(*characterPtr))
+      continue;
+    else
+      return ::isdigit(*characterPtr) != 0 && *characterPtr != '0';
+  }
+#endif  // DEBUG
+  return false;
+}
+
+inline void DebugBreak() {
+#ifdef DEBUG
+  // only break into the debugger if the debugger is attached
+  if (IsDebuggerAttached())
+    raise(SIGINT);  // breaks into GDB and stops, can be continued
+#endif              // DEBUG
+}
+
+inline int NotImplementedLog(const DoutPrefixProvider* ldpp,
+                             const char* filename, int linenumber,
+                             const char* functionname) {
+  if (ldpp)
+    ldpp_dout(ldpp, 20) << filename << "(" << linenumber << ") " << functionname
+                        << ": Not implemented" << dendl;
+  return 0;
+}
+
+inline int NotImplementedGdbBreak(const DoutPrefixProvider* ldpp,
+                                  const char* filename, int linenumber,
+                                  const char* functionname) {
+  NotImplementedLog(ldpp, filename, linenumber, functionname);
+  DebugBreak();
+  return 0;
+}
+
+#define DAOS_NOT_IMPLEMENTED_GDB_BREAK(ldpp) \
+  NotImplementedGdbBreak(ldpp, __FILE__, __LINE__, __FUNCTION__)
+#define DAOS_NOT_IMPLEMENTED_LOG(ldpp) \
+  NotImplementedLog(ldpp, __FILE__, __LINE__, __FUNCTION__)
+
+namespace rgw::sal {
+
+class DaosStore;
+class DaosObject;
+
+#ifdef DEBUG
+// Prepends each log entry with the "filename(source_line) function_name". Makes
+// it simple to
+//  associate log entries with the source that generated the log entry
+#undef ldpp_dout
+#define ldpp_dout(dpp, v)                                                     \
+  if (decltype(auto) pdpp = (dpp);                                            \
+      pdpp) /* workaround -Wnonnull-compare for 'this' */                     \
+  dout_impl(pdpp->get_cct(), ceph::dout::need_dynamic(pdpp->get_subsys()), v) \
+          pdpp->gen_prefix(*_dout)                                            \
+      << __FILE__ << "(" << __LINE__ << ") " << __FUNCTION__ << " - "
+#endif  // DEBUG
+
+struct DaosUserInfo {
+  RGWUserInfo info;
+  obj_version user_version;
+  rgw::sal::Attrs attrs;
+
+  void encode(bufferlist& bl) const {
+    ENCODE_START(3, 3, bl);
+    encode(info, bl);
+    encode(user_version, bl);
+    encode(attrs, bl);
+    ENCODE_FINISH(bl);
+  }
+
+  void decode(bufferlist::const_iterator& bl) {
+    DECODE_START(3, bl);
+    decode(info, bl);
+    decode(user_version, bl);
+    decode(attrs, bl);
+    DECODE_FINISH(bl);
+  }
+};
+WRITE_CLASS_ENCODER(DaosUserInfo);
+
+class DaosNotification : public StoreNotification {
+ public:
+  DaosNotification(Object* _obj, Object* _src_obj, rgw::notify::EventType _type)
+      : StoreNotification(_obj, _src_obj, _type) {}
+  ~DaosNotification() = default;
+
+  virtual int publish_reserve(const DoutPrefixProvider* dpp,
+                              RGWObjTags* obj_tags = nullptr) override {
+    return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+  }
+  virtual int publish_commit(const DoutPrefixProvider* dpp, uint64_t size,
+                             const ceph::real_time& mtime,
+                             const std::string& etag,
+                             const std::string& version) override {
+    return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+  }
+};
+
+class DaosUser : public StoreUser {
+ private:
+  DaosStore* store;
+  std::vector<const char*> access_ids;
+
+ public:
+  DaosUser(DaosStore* _st, const rgw_user& _u) : StoreUser(_u), store(_st) {}
+  DaosUser(DaosStore* _st, const RGWUserInfo& _i) : StoreUser(_i), store(_st) {}
+  DaosUser(DaosStore* _st) : store(_st) {}
+  DaosUser(DaosUser& _o) = default;
+  DaosUser() {}
+
+  virtual std::unique_ptr<User> clone() override {
+    return std::make_unique<DaosUser>(*this);
+  }
+  int list_buckets(const DoutPrefixProvider* dpp, const std::string& marker,
+                   const std::string& end_marker, uint64_t max, bool need_stats,
+                   BucketList& buckets, optional_yield y) override;
+  virtual int create_bucket(
+      const DoutPrefixProvider* dpp, const rgw_bucket& b,
+      const std::string& zonegroup_id, rgw_placement_rule& placement_rule,
+      std::string& swift_ver_location, const RGWQuotaInfo* pquota_info,
+      const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info,
+      obj_version& ep_objv, bool exclusive, bool obj_lock_enabled,
+      bool* existed, req_info& req_info, std::unique_ptr<Bucket>* bucket,
+      optional_yield y) override;
+  virtual int read_attrs(const DoutPrefixProvider* dpp,
+                         optional_yield y) override;
+  virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp,
+                                    Attrs& new_attrs,
+                                    optional_yield y) override;
+  virtual int read_stats(const DoutPrefixProvider* dpp, optional_yield y,
+                         RGWStorageStats* stats,
+                         ceph::real_time* last_stats_sync = nullptr,
+                         ceph::real_time* last_stats_update = nullptr) override;
+  virtual int read_stats_async(const DoutPrefixProvider* dpp,
+                               RGWGetUserStats_CB* cb) override;
+  virtual int complete_flush_stats(const DoutPrefixProvider* dpp,
+                                   optional_yield y) override;
+  virtual int read_usage(
+      const DoutPrefixProvider* dpp, uint64_t start_epoch, uint64_t end_epoch,
+      uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter,
+      std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
+  virtual int trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
+                         uint64_t end_epoch) override;
+
+  virtual int load_user(const DoutPrefixProvider* dpp,
+                        optional_yield y) override;
+  virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y,
+                         bool exclusive,
+                         RGWUserInfo* old_info = nullptr) override;
+  virtual int remove_user(const DoutPrefixProvider* dpp,
+                          optional_yield y) override;
+
+  /** Read user info without loading it */
+  int read_user(const DoutPrefixProvider* dpp, std::string name,
+                DaosUserInfo* duinfo);
+
+  std::unique_ptr<struct ds3_user_info> get_encoded_info(bufferlist& bl,
+                                                         obj_version& obj_ver);
+
+  friend class DaosBucket;
+};
+
+// RGWBucketInfo and other information that are shown when listing a bucket is
+// represented in struct DaosBucketInfo. The structure is encoded and stored
+// as the value of the global bucket instance index.
+// TODO: compare pros and cons of separating the bucket_attrs (ACLs, tag etc.)
+// into a different index.
+struct DaosBucketInfo {
+  RGWBucketInfo info;
+
+  obj_version bucket_version;
+  ceph::real_time mtime;
+
+  rgw::sal::Attrs bucket_attrs;
+
+  void encode(bufferlist& bl) const {
+    ENCODE_START(4, 4, bl);
+    encode(info, bl);
+    encode(bucket_version, bl);
+    encode(mtime, bl);
+    encode(bucket_attrs, bl);  // rgw_cache.h example for a map
+    ENCODE_FINISH(bl);
+  }
+
+  void decode(bufferlist::const_iterator& bl) {
+    DECODE_START(4, bl);
+    decode(info, bl);
+    decode(bucket_version, bl);
+    decode(mtime, bl);
+    decode(bucket_attrs, bl);
+    DECODE_FINISH(bl);
+  }
+};
+WRITE_CLASS_ENCODER(DaosBucketInfo);
+
+class DaosBucket : public StoreBucket {
+ private:
+  DaosStore* store;
+  RGWAccessControlPolicy acls;
+
+ public:
+  /** Container ds3b handle */
+  ds3_bucket_t* ds3b = nullptr;
+
+  DaosBucket(DaosStore* _st) : store(_st), acls() {}
+
+  DaosBucket(const DaosBucket& _daos_bucket)
+      : store(_daos_bucket.store), acls(), ds3b(nullptr) {
+    // TODO: deep copy all objects
+  }
+
+  DaosBucket(DaosStore* _st, User* _u) : StoreBucket(_u), store(_st), acls() {}
+
+  DaosBucket(DaosStore* _st, const rgw_bucket& _b)
+      : StoreBucket(_b), store(_st), acls() {}
+
+  DaosBucket(DaosStore* _st, const RGWBucketEnt& _e)
+      : StoreBucket(_e), store(_st), acls() {}
+
+  DaosBucket(DaosStore* _st, const RGWBucketInfo& _i)
+      : StoreBucket(_i), store(_st), acls() {}
+
+  DaosBucket(DaosStore* _st, const rgw_bucket& _b, User* _u)
+      : StoreBucket(_b, _u), store(_st), acls() {}
+
+  DaosBucket(DaosStore* _st, const RGWBucketEnt& _e, User* _u)
+      : StoreBucket(_e, _u), store(_st), acls() {}
+
+  DaosBucket(DaosStore* _st, const RGWBucketInfo& _i, User* _u)
+      : StoreBucket(_i, _u), store(_st), acls() {}
+
+  ~DaosBucket();
+
+  virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override;
+  virtual int list(const DoutPrefixProvider* dpp, ListParams&, int,
+                   ListResults&, optional_yield y) override;
+  virtual int remove_bucket(const DoutPrefixProvider* dpp, bool delete_children,
+                            bool forward_to_master, req_info* req_info,
+                            optional_yield y) override;
+  virtual int remove_bucket_bypass_gc(int concurrent_max,
+                                      bool keep_index_consistent,
+                                      optional_yield y,
+                                      const DoutPrefixProvider* dpp) override;
+  virtual RGWAccessControlPolicy& get_acl(void) override { return acls; }
+  virtual int set_acl(const DoutPrefixProvider* dpp,
+                      RGWAccessControlPolicy& acl, optional_yield y) override;
+  virtual int load_bucket(const DoutPrefixProvider* dpp, optional_yield y,
+                          bool get_stats = false) override;
+  virtual int read_stats(const DoutPrefixProvider* dpp,
+                         const bucket_index_layout_generation& idx_layout,
+                         int shard_id, std::string* bucket_ver,
+                         std::string* master_ver,
+                         std::map<RGWObjCategory, RGWStorageStats>& stats,
+                         std::string* max_marker = nullptr,
+                         bool* syncstopped = nullptr) override;
+  virtual int read_stats_async(const DoutPrefixProvider* dpp,
+                               const bucket_index_layout_generation& idx_layout,
+                               int shard_id,
+                               RGWGetBucketStats_CB* ctx) override;
+  virtual int sync_user_stats(const DoutPrefixProvider* dpp,
+                              optional_yield y) override;
+  virtual int update_container_stats(const DoutPrefixProvider* dpp) override;
+  virtual int check_bucket_shards(const DoutPrefixProvider* dpp) override;
+  virtual int chown(const DoutPrefixProvider* dpp, User* new_user,
+                    User* old_user, optional_yield y,
+                    const std::string* marker = nullptr) override;
+  virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive,
+                       ceph::real_time mtime) override;
+  virtual bool is_owner(User* user) override;
+  virtual int check_empty(const DoutPrefixProvider* dpp,
+                          optional_yield y) override;
+  virtual int check_quota(const DoutPrefixProvider* dpp, RGWQuota& quota,
+                          uint64_t obj_size, optional_yield y,
+                          bool check_size_only = false) override;
+  virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& attrs,
+                                    optional_yield y) override;
+  virtual int try_refresh_info(const DoutPrefixProvider* dpp,
+                               ceph::real_time* pmtime) override;
+  virtual int read_usage(
+      const DoutPrefixProvider* dpp, uint64_t start_epoch, uint64_t end_epoch,
+      uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter,
+      std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
+  virtual int trim_usage(const DoutPrefixProvider* dpp, uint64_t start_epoch,
+                         uint64_t end_epoch) override;
+  virtual int remove_objs_from_index(
+      const DoutPrefixProvider* dpp,
+      std::list<rgw_obj_index_key>& objs_to_unlink) override;
+  virtual int check_index(
+      const DoutPrefixProvider* dpp,
+      std::map<RGWObjCategory, RGWStorageStats>& existing_stats,
+      std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) override;
+  virtual int rebuild_index(const DoutPrefixProvider* dpp) override;
+  virtual int set_tag_timeout(const DoutPrefixProvider* dpp,
+                              uint64_t timeout) override;
+  virtual int purge_instance(const DoutPrefixProvider* dpp) override;
+  virtual std::unique_ptr<Bucket> clone() override {
+    return std::make_unique<DaosBucket>(*this);
+  }
+  virtual std::unique_ptr<MultipartUpload> get_multipart_upload(
+      const std::string& oid,
+      std::optional<std::string> upload_id = std::nullopt, ACLOwner owner = {},
+      ceph::real_time mtime = real_clock::now()) override;
+  virtual int list_multiparts(
+      const DoutPrefixProvider* dpp, const std::string& prefix,
+      std::string& marker, const std::string& delim, const int& max_uploads,
+      std::vector<std::unique_ptr<MultipartUpload>>& uploads,
+      std::map<std::string, bool>* common_prefixes,
+      bool* is_truncated) override;
+  virtual int abort_multiparts(const DoutPrefixProvider* dpp,
+                               CephContext* cct) override;
+
+  int open(const DoutPrefixProvider* dpp);
+  int close(const DoutPrefixProvider* dpp);
+  bool is_open() { return ds3b != nullptr; }
+  std::unique_ptr<struct ds3_bucket_info> get_encoded_info(
+      bufferlist& bl, ceph::real_time mtime);
+
+  friend class DaosStore;
+};
+
+class DaosPlacementTier : public StorePlacementTier {
+  DaosStore* store;
+  RGWZoneGroupPlacementTier tier;
+
+ public:
+  DaosPlacementTier(DaosStore* _store, const RGWZoneGroupPlacementTier& _tier)
+      : store(_store), tier(_tier) {}
+  virtual ~DaosPlacementTier() = default;
+
+  virtual const std::string& get_tier_type() { return tier.tier_type; }
+  virtual const std::string& get_storage_class() { return tier.storage_class; }
+  virtual bool retain_head_object() { return tier.retain_head_object; }
+  RGWZoneGroupPlacementTier& get_rt() { return tier; }
+};
+
+class DaosZoneGroup : public StoreZoneGroup {
+  DaosStore* store;
+  const RGWZoneGroup group;
+  std::string empty;
+
+ public:
+  DaosZoneGroup(DaosStore* _store) : store(_store), group() {}
+  DaosZoneGroup(DaosStore* _store, const RGWZoneGroup& _group)
+      : store(_store), group(_group) {}
+  virtual ~DaosZoneGroup() = default;
+
+  virtual const std::string& get_id() const override { return group.get_id(); };
+  virtual const std::string& get_name() const override {
+    return group.get_name();
+  };
+  virtual int equals(const std::string& other_zonegroup) const override {
+    return group.equals(other_zonegroup);
+  };
+  /** Get the endpoint from zonegroup, or from master zone if not set */
+  virtual const std::string& get_endpoint() const override;
+  virtual bool placement_target_exists(std::string& target) const override;
+  virtual bool is_master_zonegroup() const override {
+    return group.is_master_zonegroup();
+  };
+  virtual const std::string& get_api_name() const override {
+    return group.api_name;
+  };
+  virtual int get_placement_target_names(
+      std::set<std::string>& names) const override;
+  virtual const std::string& get_default_placement_name() const override {
+    return group.default_placement.name;
+  };
+  virtual int get_hostnames(std::list<std::string>& names) const override {
+    names = group.hostnames;
+    return 0;
+  };
+  virtual int get_s3website_hostnames(
+      std::list<std::string>& names) const override {
+    names = group.hostnames_s3website;
+    return 0;
+  };
+  virtual int get_zone_count() const override { return group.zones.size(); }
+  virtual int get_placement_tier(const rgw_placement_rule& rule,
+                                 std::unique_ptr<PlacementTier>* tier);
+  virtual std::unique_ptr<ZoneGroup> clone() override {
+    return std::make_unique<DaosZoneGroup>(store, group);
+  }
+  const RGWZoneGroup& get_group() { return group; }
+};
+
+class DaosZone : public StoreZone {
+ protected:
+  DaosStore* store;
+  RGWRealm* realm{nullptr};
+  DaosZoneGroup zonegroup;
+  RGWZone* zone_public_config{
+      nullptr}; /* external zone params, e.g., entrypoints, log flags, etc. */
+  RGWZoneParams* zone_params{
+      nullptr}; /* internal zone params, e.g., rados pools */
+  RGWPeriod* current_period{nullptr};
+  rgw_zone_id cur_zone_id;
+
+ public:
+  DaosZone(DaosStore* _store) : store(_store), zonegroup(_store) {
+    realm = new RGWRealm();
+    zone_public_config = new RGWZone();
+    zone_params = new RGWZoneParams();
+    current_period = new RGWPeriod();
+    cur_zone_id = rgw_zone_id(zone_params->get_id());
+
+    // XXX: only default and STANDARD supported for now
+    RGWZonePlacementInfo info;
+    RGWZoneStorageClasses sc;
+    sc.set_storage_class("STANDARD", nullptr, nullptr);
+    info.storage_classes = sc;
+    zone_params->placement_pools["default"] = info;
+  }
+  DaosZone(DaosStore* _store, DaosZoneGroup _zg)
+      : store(_store), zonegroup(_zg) {
+    realm = new RGWRealm();
+    zone_public_config = new RGWZone();
+    zone_params = new RGWZoneParams();
+    current_period = new RGWPeriod();
+    cur_zone_id = rgw_zone_id(zone_params->get_id());
+
+    // XXX: only default and STANDARD supported for now
+    RGWZonePlacementInfo info;
+    RGWZoneStorageClasses sc;
+    sc.set_storage_class("STANDARD", nullptr, nullptr);
+    info.storage_classes = sc;
+    zone_params->placement_pools["default"] = info;
+  }
+  ~DaosZone() = default;
+
+  virtual std::unique_ptr<Zone> clone() override {
+    return std::make_unique<DaosZone>(store);
+  }
+  virtual ZoneGroup& get_zonegroup() override;
+  virtual int get_zonegroup(const std::string& id,
+                            std::unique_ptr<ZoneGroup>* zonegroup) override;
+  virtual const rgw_zone_id& get_id() override;
+  virtual const std::string& get_name() const override;
+  virtual bool is_writeable() override;
+  virtual bool get_redirect_endpoint(std::string* endpoint) override;
+  virtual bool has_zonegroup_api(const std::string& api) const override;
+  virtual const std::string& get_current_period_id() override;
+  virtual const RGWAccessKey& get_system_key() {
+    return zone_params->system_key;
+  }
+  virtual const std::string& get_realm_name() { return realm->get_name(); }
+  virtual const std::string& get_realm_id() { return realm->get_id(); }
+  virtual const std::string_view get_tier_type() { return "rgw"; }
+
+  friend class DaosStore;
+};
+
+class DaosLuaManager : public StoreLuaManager {
+  DaosStore* store;
+
+ public:
+  DaosLuaManager(DaosStore* _s) : store(_s) {}
+  virtual ~DaosLuaManager() = default;
+
+  virtual int get_script(const DoutPrefixProvider* dpp, optional_yield y,
+                         const std::string& key, std::string& script) override {
+    DAOS_NOT_IMPLEMENTED_LOG(dpp);
+    return -ENOENT;
+  };
+
+  virtual int put_script(const DoutPrefixProvider* dpp, optional_yield y,
+                         const std::string& key,
+                         const std::string& script) override {
+    DAOS_NOT_IMPLEMENTED_LOG(dpp);
+    return -ENOENT;
+  };
+
+  virtual int del_script(const DoutPrefixProvider* dpp, optional_yield y,
+                         const std::string& key) override {
+    DAOS_NOT_IMPLEMENTED_LOG(dpp);
+    return -ENOENT;
+  };
+
+  virtual int add_package(const DoutPrefixProvider* dpp, optional_yield y,
+                          const std::string& package_name) override {
+    DAOS_NOT_IMPLEMENTED_LOG(dpp);
+    return -ENOENT;
+  };
+
+  virtual int remove_package(const DoutPrefixProvider* dpp, optional_yield y,
+                             const std::string& package_name) override {
+    DAOS_NOT_IMPLEMENTED_LOG(dpp);
+    return -ENOENT;
+  };
+
+  virtual int list_packages(const DoutPrefixProvider* dpp, optional_yield y,
+                            rgw::lua::packages_t& packages) override {
+    DAOS_NOT_IMPLEMENTED_LOG(dpp);
+    return -ENOENT;
+  };
+};
+
+class DaosObject : public StoreObject {
+ private:
+  DaosStore* store;
+  RGWAccessControlPolicy acls;
+
+ public:
+  struct DaosReadOp : public StoreReadOp {
+   private:
+    DaosObject* source;
+
+   public:
+    DaosReadOp(DaosObject* _source);
+
+    virtual int prepare(optional_yield y,
+                        const DoutPrefixProvider* dpp) override;
+    virtual int read(int64_t off, int64_t end, bufferlist& bl, optional_yield y,
+                     const DoutPrefixProvider* dpp) override;
+    virtual int iterate(const DoutPrefixProvider* dpp, int64_t off, int64_t end,
+                        RGWGetDataCB* cb, optional_yield y) override;
+    virtual int get_attr(const DoutPrefixProvider* dpp, const char* name,
+                         bufferlist& dest, optional_yield y) override;
+  };
+
+  struct DaosDeleteOp : public StoreDeleteOp {
+   private:
+    DaosObject* source;
+
+   public:
+    DaosDeleteOp(DaosObject* _source);
+
+    virtual int delete_obj(const DoutPrefixProvider* dpp,
+                           optional_yield y) override;
+  };
+
+  ds3_obj_t* ds3o = nullptr;
+
+  DaosObject() = default;
+
+  DaosObject(DaosStore* _st, const rgw_obj_key& _k)
+      : StoreObject(_k), store(_st), acls() {}
+  DaosObject(DaosStore* _st, const rgw_obj_key& _k, Bucket* _b)
+      : StoreObject(_k, _b), store(_st), acls() {}
+
+  DaosObject(DaosObject& _o) = default;
+
+  virtual ~DaosObject();
+
+  virtual int delete_object(const DoutPrefixProvider* dpp, optional_yield y,
+                            bool prevent_versioning = false) override;
+  virtual int delete_obj_aio(const DoutPrefixProvider* dpp, RGWObjState* astate,
+                             Completions* aio, bool keep_index_consistent,
+                             optional_yield y) override;
+  virtual int copy_object(
+      User* user, req_info* info, const rgw_zone_id& source_zone,
+      rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
+      rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement,
+      ceph::real_time* src_mtime, ceph::real_time* mtime,
+      const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr,
+      bool high_precision_time, const char* if_match, const char* if_nomatch,
+      AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs,
+      RGWObjCategory category, uint64_t olh_epoch,
+      boost::optional<ceph::real_time> delete_at, std::string* version_id,
+      std::string* tag, std::string* etag, void (*progress_cb)(off_t, void*),
+      void* progress_data, const DoutPrefixProvider* dpp,
+      optional_yield y) override;
+  virtual RGWAccessControlPolicy& get_acl(void) override { return acls; }
+  virtual int set_acl(const RGWAccessControlPolicy& acl) override {
+    acls = acl;
+    return 0;
+  }
+
+  virtual int get_obj_state(const DoutPrefixProvider* dpp, RGWObjState** state,
+                            optional_yield y, bool follow_olh = true) override;
+  virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs,
+                            Attrs* delattrs, optional_yield y) override;
+  virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp,
+                            rgw_obj* target_obj = NULL) override;
+  virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val,
+                               optional_yield y,
+                               const DoutPrefixProvider* dpp) override;
+  virtual int delete_obj_attrs(const DoutPrefixProvider* dpp,
+                               const char* attr_name,
+                               optional_yield y) override;
+  virtual bool is_expired() override;
+  virtual void gen_rand_obj_instance_name() override;
+  virtual std::unique_ptr<Object> clone() override {
+    return std::make_unique<DaosObject>(*this);
+  }
+  virtual std::unique_ptr<MPSerializer> get_serializer(
+      const DoutPrefixProvider* dpp, const std::string& lock_name) override;
+  virtual int transition(Bucket* bucket,
+                         const rgw_placement_rule& placement_rule,
+                         const real_time& mtime, uint64_t olh_epoch,
+                         const DoutPrefixProvider* dpp,
+                         optional_yield y) override;
+  virtual int transition_to_cloud(Bucket* bucket, rgw::sal::PlacementTier* tier,
+                                  rgw_bucket_dir_entry& o,
+                                  std::set<std::string>& cloud_targets,
+                                  CephContext* cct, bool update_object,
+                                  const DoutPrefixProvider* dpp,
+                                  optional_yield y) override;
+  virtual bool placement_rules_match(rgw_placement_rule& r1,
+                                     rgw_placement_rule& r2) override;
+  virtual int dump_obj_layout(const DoutPrefixProvider* dpp, optional_yield y,
+                              Formatter* f) override;
+
+  /* Swift versioning */
+  virtual int swift_versioning_restore(bool& restored,
+                                       const DoutPrefixProvider* dpp) override;
+  virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
+                                    optional_yield y) override;
+
+  /* OPs */
+  virtual std::unique_ptr<ReadOp> get_read_op() override;
+  virtual std::unique_ptr<DeleteOp> get_delete_op() override;
+
+  /* OMAP */
+  virtual int omap_get_vals(const DoutPrefixProvider* dpp,
+                            const std::string& marker, uint64_t count,
+                            std::map<std::string, bufferlist>* m, bool* pmore,
+                            optional_yield y) override;
+  virtual int omap_get_all(const DoutPrefixProvider* dpp,
+                           std::map<std::string, bufferlist>* m,
+                           optional_yield y) override;
+  virtual int omap_get_vals_by_keys(const DoutPrefixProvider* dpp,
+                                    const std::string& oid,
+                                    const std::set<std::string>& keys,
+                                    Attrs* vals) override;
+  virtual int omap_set_val_by_key(const DoutPrefixProvider* dpp,
+                                  const std::string& key, bufferlist& val,
+                                  bool must_exist, optional_yield y) override;
+
+  bool is_open() { return ds3o != nullptr; };
+  // Only lookup the object, do not create
+  int lookup(const DoutPrefixProvider* dpp);
+  // Create the object, truncate if exists
+  int create(const DoutPrefixProvider* dpp);
+  // Release the daos resources
+  int close(const DoutPrefixProvider* dpp);
+  // Write to object starting from offset
+  int write(const DoutPrefixProvider* dpp, bufferlist&& data, uint64_t offset);
+  // Read size bytes from object starting from offset
+  int read(const DoutPrefixProvider* dpp, bufferlist& data, uint64_t offset,
+           uint64_t& size);
+  // Get the object's dirent and attrs
+  int get_dir_entry_attrs(const DoutPrefixProvider* dpp,
+                          rgw_bucket_dir_entry* ent, Attrs* getattrs = nullptr);
+  // Set the object's dirent and attrs
+  int set_dir_entry_attrs(const DoutPrefixProvider* dpp,
+                          rgw_bucket_dir_entry* ent, Attrs* setattrs = nullptr);
+  // Marks this DAOS object as being the latest version and unmarks all other
+  // versions as latest
+  int mark_as_latest(const DoutPrefixProvider* dpp, ceph::real_time set_mtime);
+  // get_bucket casted as DaosBucket*
+  DaosBucket* get_daos_bucket() {
+    return static_cast<DaosBucket*>(get_bucket());
+  }
+};
+
+// A placeholder locking class for multipart upload.
+class MPDaosSerializer : public StoreMPSerializer {
+ public:
+  MPDaosSerializer(const DoutPrefixProvider* dpp, DaosStore* store,
+                   DaosObject* obj, const std::string& lock_name) {}
+
+  virtual int try_lock(const DoutPrefixProvider* dpp, utime_t dur,
+                       optional_yield y) override {
+    return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+  }
+  virtual int unlock() override { return DAOS_NOT_IMPLEMENTED_LOG(nullptr); }
+};
+
+class DaosAtomicWriter : public StoreWriter {
+ protected:
+  rgw::sal::DaosStore* store;
+  const rgw_user& owner;
+  const rgw_placement_rule* ptail_placement_rule;
+  uint64_t olh_epoch;
+  const std::string& unique_tag;
+  DaosObject obj;
+  uint64_t total_data_size = 0;  // for total data being uploaded
+
+ public:
+  DaosAtomicWriter(const DoutPrefixProvider* dpp, optional_yield y,
+                   std::unique_ptr<rgw::sal::Object> _head_obj,
+                   DaosStore* _store, const rgw_user& _owner,
+                   const rgw_placement_rule* _ptail_placement_rule,
+                   uint64_t _olh_epoch, const std::string& _unique_tag);
+  ~DaosAtomicWriter() = default;
+
+  // prepare to start processing object data
+  virtual int prepare(optional_yield y) override;
+
+  // Process a bufferlist
+  virtual int process(bufferlist&& data, uint64_t offset) override;
+
+  // complete the operation and make its result visible to clients
+  virtual int complete(size_t accounted_size, const std::string& etag,
+                       ceph::real_time* mtime, ceph::real_time set_mtime,
+                       std::map<std::string, bufferlist>& attrs,
+                       ceph::real_time delete_at, const char* if_match,
+                       const char* if_nomatch, const std::string* user_data,
+                       rgw_zone_set* zones_trace, bool* canceled,
+                       optional_yield y) override;
+};
+
+class DaosMultipartWriter : public StoreWriter {
+ protected:
+  rgw::sal::DaosStore* store;
+  MultipartUpload* upload;
+  std::string upload_id;
+
+  // Part parameters.
+  const uint64_t part_num;
+  const std::string part_num_str;
+  uint64_t actual_part_size = 0;
+
+  ds3_part_t* ds3p = nullptr;
+  bool is_open() { return ds3p != nullptr; };
+
+ public:
+  DaosMultipartWriter(const DoutPrefixProvider* dpp, optional_yield y,
+                      MultipartUpload* _upload,
+                      std::unique_ptr<rgw::sal::Object> _head_obj,
+                      DaosStore* _store, const rgw_user& owner,
+                      const rgw_placement_rule* ptail_placement_rule,
+                      uint64_t _part_num, const std::string& part_num_str)
+      : StoreWriter(dpp, y),
+        store(_store),
+        upload(_upload),
+        upload_id(_upload->get_upload_id()),
+        part_num(_part_num),
+        part_num_str(part_num_str) {}
+  virtual ~DaosMultipartWriter();
+
+  // prepare to start processing object data
+  virtual int prepare(optional_yield y) override;
+
+  // Process a bufferlist
+  virtual int process(bufferlist&& data, uint64_t offset) override;
+
+  // complete the operation and make its result visible to clients
+  virtual int complete(size_t accounted_size, const std::string& etag,
+                       ceph::real_time* mtime, ceph::real_time set_mtime,
+                       std::map<std::string, bufferlist>& attrs,
+                       ceph::real_time delete_at, const char* if_match,
+                       const char* if_nomatch, const std::string* user_data,
+                       rgw_zone_set* zones_trace, bool* canceled,
+                       optional_yield y) override;
+
+  const std::string& get_bucket_name();
+};
+
+class DaosMultipartPart : public StoreMultipartPart {
+ protected:
+  RGWUploadPartInfo info;
+
+ public:
+  DaosMultipartPart() = default;
+  virtual ~DaosMultipartPart() = default;
+
+  virtual uint32_t get_num() { return info.num; }
+  virtual uint64_t get_size() { return info.accounted_size; }
+  virtual const std::string& get_etag() { return info.etag; }
+  virtual ceph::real_time& get_mtime() { return info.modified; }
+
+  friend class DaosMultipartUpload;
+};
+
+class DaosMultipartUpload : public StoreMultipartUpload {
+  DaosStore* store;
+  RGWMPObj mp_obj;
+  ACLOwner owner;
+  ceph::real_time mtime;
+  rgw_placement_rule placement;
+  RGWObjManifest manifest;
+
+ public:
+  DaosMultipartUpload(DaosStore* _store, Bucket* _bucket,
+                      const std::string& oid,
+                      std::optional<std::string> upload_id, ACLOwner _owner,
+                      ceph::real_time _mtime)
+      : StoreMultipartUpload(_bucket),
+        store(_store),
+        mp_obj(oid, upload_id),
+        owner(_owner),
+        mtime(_mtime) {}
+  virtual ~DaosMultipartUpload() = default;
+
+  virtual const std::string& get_meta() const { return mp_obj.get_meta(); }
+  virtual const std::string& get_key() const { return mp_obj.get_key(); }
+  virtual const std::string& get_upload_id() const {
+    return mp_obj.get_upload_id();
+  }
+  virtual const ACLOwner& get_owner() const override { return owner; }
+  virtual ceph::real_time& get_mtime() { return mtime; }
+  virtual std::unique_ptr<rgw::sal::Object> get_meta_obj() override;
+  virtual int init(const DoutPrefixProvider* dpp, optional_yield y,
+                   ACLOwner& owner, rgw_placement_rule& dest_placement,
+                   rgw::sal::Attrs& attrs) override;
+  virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+                         int num_parts, int marker, int* next_marker,
+                         bool* truncated,
+                         bool assume_unsorted = false) override;
+  virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) override;
+  virtual int complete(const DoutPrefixProvider* dpp, optional_yield y,
+                       CephContext* cct, std::map<int, std::string>& part_etags,
+                       std::list<rgw_obj_index_key>& remove_objs,
+                       uint64_t& accounted_size, bool& compressed,
+                       RGWCompressionInfo& cs_info, off_t& off,
+                       std::string& tag, ACLOwner& owner, uint64_t olh_epoch,
+                       rgw::sal::Object* target_obj) override;
+  virtual int get_info(const DoutPrefixProvider* dpp, optional_yield y,
+                       rgw_placement_rule** rule,
+                       rgw::sal::Attrs* attrs = nullptr) override;
+  virtual std::unique_ptr<Writer> get_writer(
+      const DoutPrefixProvider* dpp, optional_yield y,
+      std::unique_ptr<rgw::sal::Object> _head_obj, const rgw_user& owner,
+      const rgw_placement_rule* ptail_placement_rule, uint64_t part_num,
+      const std::string& part_num_str) override;
+  const std::string& get_bucket_name() { return bucket->get_name(); }
+};
+
+class DaosStore : public StoreStore {
+ private:
+  std::string luarocks_path;
+  DaosZone zone;
+  RGWSyncModuleInstanceRef sync_module;
+
+ public:
+  ds3_t* ds3 = nullptr;
+
+  CephContext* cctx;
+
+  DaosStore(CephContext* c) : zone(this), cctx(c) {}
+  ~DaosStore() = default;
+
+  virtual const std::string get_name() const override { return "daos"; }
+
+  virtual std::unique_ptr<User> get_user(const rgw_user& u) override;
+  virtual std::string get_cluster_id(const DoutPrefixProvider* dpp,
+                                     optional_yield y) override;
+  virtual int get_user_by_access_key(const DoutPrefixProvider* dpp,
+                                     const std::string& key, optional_yield y,
+                                     std::unique_ptr<User>* user) override;
+  virtual int get_user_by_email(const DoutPrefixProvider* dpp,
+                                const std::string& email, optional_yield y,
+                                std::unique_ptr<User>* user) override;
+  virtual int get_user_by_swift(const DoutPrefixProvider* dpp,
+                                const std::string& user_str, optional_yield y,
+                                std::unique_ptr<User>* user) override;
+  virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override;
+  virtual int get_bucket(const DoutPrefixProvider* dpp, User* u,
+                         const rgw_bucket& b, std::unique_ptr<Bucket>* bucket,
+                         optional_yield y) override;
+  virtual int get_bucket(User* u, const RGWBucketInfo& i,
+                         std::unique_ptr<Bucket>* bucket) override;
+  virtual int get_bucket(const DoutPrefixProvider* dpp, User* u,
+                         const std::string& tenant, const std::string& name,
+                         std::unique_ptr<Bucket>* bucket,
+                         optional_yield y) override;
+  virtual bool is_meta_master() override;
+  virtual int forward_request_to_master(const DoutPrefixProvider* dpp,
+                                        User* user, obj_version* objv,
+                                        bufferlist& in_data, JSONParser* jp,
+                                        req_info& info,
+                                        optional_yield y) override;
+  virtual int forward_iam_request_to_master(
+      const DoutPrefixProvider* dpp, const RGWAccessKey& key, obj_version* objv,
+      bufferlist& in_data, RGWXMLDecoder::XMLParser* parser, req_info& info,
+      optional_yield y) override;
+  virtual Zone* get_zone() { return &zone; }
+  virtual std::string zone_unique_id(uint64_t unique_num) override;
+  virtual std::string zone_unique_trans_id(const uint64_t unique_num) override;
+  virtual int cluster_stat(RGWClusterStat& stats) override;
+  virtual std::unique_ptr<Lifecycle> get_lifecycle(void) override;
+  virtual std::unique_ptr<Completions> get_completions(void) override;
+  virtual std::unique_ptr<Notification> get_notification(
+      rgw::sal::Object* obj, rgw::sal::Object* src_obj, struct req_state* s,
+      rgw::notify::EventType event_type,
+      const std::string* object_name = nullptr) override;
+  virtual std::unique_ptr<Notification> get_notification(
+      const DoutPrefixProvider* dpp, rgw::sal::Object* obj,
+      rgw::sal::Object* src_obj, rgw::notify::EventType event_type,
+      rgw::sal::Bucket* _bucket, std::string& _user_id,
+      std::string& _user_tenant, std::string& _req_id,
+      optional_yield y) override;
+  virtual RGWLC* get_rgwlc(void) override { return NULL; }
+  virtual RGWCoroutinesManagerRegistry* get_cr_registry() override {
+    return NULL;
+  }
+
+  virtual int log_usage(
+      const DoutPrefixProvider* dpp,
+      std::map<rgw_user_bucket, RGWUsageBatch>& usage_info) override;
+  virtual int log_op(const DoutPrefixProvider* dpp, std::string& oid,
+                     bufferlist& bl) override;
+  virtual int register_to_service_map(
+      const DoutPrefixProvider* dpp, const std::string& daemon_type,
+      const std::map<std::string, std::string>& meta) override;
+  virtual void get_quota(RGWQuota& quota) override;
+  virtual void get_ratelimit(RGWRateLimitInfo& bucket_ratelimit,
+                             RGWRateLimitInfo& user_ratelimit,
+                             RGWRateLimitInfo& anon_ratelimit) override;
+  virtual int set_buckets_enabled(const DoutPrefixProvider* dpp,
+                                  std::vector<rgw_bucket>& buckets,
+                                  bool enabled) override;
+  virtual uint64_t get_new_req_id() override {
+    return DAOS_NOT_IMPLEMENTED_LOG(nullptr);
+  }
+  virtual int get_sync_policy_handler(const DoutPrefixProvider* dpp,
+                                      std::optional<rgw_zone_id> zone,
+                                      std::optional<rgw_bucket> bucket,
+                                      RGWBucketSyncPolicyHandlerRef* phandler,
+                                      optional_yield y) override;
+  virtual RGWDataSyncStatusManager* get_data_sync_manager(
+      const rgw_zone_id& source_zone) override;
+  virtual void wakeup_meta_sync_shards(std::set<int>& shard_ids) override {
+    return;
+  }
+  virtual void wakeup_data_sync_shards(
+      const DoutPrefixProvider* dpp, const rgw_zone_id& source_zone,
+      boost::container::flat_map<
+          int, boost::container::flat_set<rgw_data_notify_entry>>& shard_ids)
+      override {
+    return;
+  }
+  virtual int clear_usage(const DoutPrefixProvider* dpp) override {
+    return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+  }
+  virtual int read_all_usage(
+      const DoutPrefixProvider* dpp, uint64_t start_epoch, uint64_t end_epoch,
+      uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter,
+      std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
+  virtual int trim_all_usage(const DoutPrefixProvider* dpp,
+                             uint64_t start_epoch, uint64_t end_epoch) override;
+  virtual int get_config_key_val(std::string name, bufferlist* bl) override;
+  virtual int meta_list_keys_init(const DoutPrefixProvider* dpp,
+                                  const std::string& section,
+                                  const std::string& marker,
+                                  void** phandle) override;
+  virtual int meta_list_keys_next(const DoutPrefixProvider* dpp, void* handle,
+                                  int max, std::list<std::string>& keys,
+                                  bool* truncated) override;
+  virtual void meta_list_keys_complete(void* handle) override;
+  virtual std::string meta_get_marker(void* handle) override;
+  virtual int meta_remove(const DoutPrefixProvider* dpp,
+                          std::string& metadata_key, optional_yield y) override;
+
+  virtual const RGWSyncModuleInstanceRef& get_sync_module() {
+    return sync_module;
+  }
+  virtual std::string get_host_id() { return ""; }
+
+  virtual std::unique_ptr<LuaManager> get_lua_manager() override;
+  virtual std::unique_ptr<RGWRole> get_role(
+      std::string name, std::string tenant, std::string path = "",
+      std::string trust_policy = "", std::string max_session_duration_str = "",
+      std::multimap<std::string, std::string> tags = {}) override;
+  virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override;
+  virtual std::unique_ptr<RGWRole> get_role(std::string id) override;
+  virtual int get_roles(const DoutPrefixProvider* dpp, optional_yield y,
+                        const std::string& path_prefix,
+                        const std::string& tenant,
+                        std::vector<std::unique_ptr<RGWRole>>& roles) override;
+  virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override;
+  virtual int get_oidc_providers(
+      const DoutPrefixProvider* dpp, const std::string& tenant,
+      std::vector<std::unique_ptr<RGWOIDCProvider>>& providers) override;
+  virtual std::unique_ptr<Writer> get_append_writer(
+      const DoutPrefixProvider* dpp, optional_yield y,
+      std::unique_ptr<rgw::sal::Object> _head_obj, const rgw_user& owner,
+      const rgw_placement_rule* ptail_placement_rule,
+      const std::string& unique_tag, uint64_t position,
+      uint64_t* cur_accounted_size) override;
+  virtual std::unique_ptr<Writer> get_atomic_writer(
+      const DoutPrefixProvider* dpp, optional_yield y,
+      std::unique_ptr<rgw::sal::Object> _head_obj, const rgw_user& owner,
+      const rgw_placement_rule* ptail_placement_rule, uint64_t olh_epoch,
+      const std::string& unique_tag) override;
+  virtual const std::string& get_compression_type(
+      const rgw_placement_rule& rule) override;
+  virtual bool valid_placement(const rgw_placement_rule& rule) override;
+
+  virtual void finalize(void) override;
+
+  virtual CephContext* ctx(void) override { return cctx; }
+
+  virtual const std::string& get_luarocks_path() const override {
+    return luarocks_path;
+  }
+
+  virtual void set_luarocks_path(const std::string& path) override {
+    luarocks_path = path;
+  }
+
+  virtual int initialize(CephContext* cct,
+                         const DoutPrefixProvider* dpp) override;
+};
+
+}  // namespace rgw::sal
diff --git a/src/rgw/store/daos/README.md b/src/rgw/store/daos/README.md
new file mode 100644 (file)
index 0000000..de6d215
--- /dev/null
@@ -0,0 +1,47 @@
+# DAOS
+
+Standalone RADOS Gateway (RGW) on [DAOS](http://daos.io/) (Experimental)
+
+## CMake Option
+
+Add below cmake option
+
+```bash
+    -DWITH_RADOSGW_DAOS=ON
+```
+
+## Build
+
+```bash
+    cd build
+    ninja [vstart]
+```
+
+## Running Test cluster
+
+Edit ceph.conf to add below option
+
+```conf
+    [client]
+        rgw backend store = daos
+```
+
+Restart vstart cluster or just RGW server
+
+```bash
+    [..] RGW=1 ../src/vstart.sh -d
+```
+
+The above configuration brings up an RGW server on DAOS.
+
+## Creating a test user
+
+ To create a `testid` user to be used for s3 operations, use the following command:
+
+ ```bash
+local akey='0555b35654ad1656d804'
+local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+    radosgw-admin user create --uid testid \
+        --access-key $akey --secret $skey \
+        --display-name 'M. Tester' --email tester@ceph.com --no-mon-config
+ ```