#include "rgw_auth_s3.h"
#include "rgw_user.h"
#include "rgw_bucket.h"
-
#include "rgw_file.h"
#include "rgw_lib_frontend.h"
+#include "common/errno.h"
#include <atomic>
op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs,
(delete_at ? *delete_at : real_time()),
- if_match, if_nomatch);
+ if_match, if_nomatch);
if (op_ret != 0) {
/* revert attr updates */
rgw_fh->set_mtime(omtime);
struct rgw_statvfs *vfs_st, uint32_t flags)
{
RGWLibFS *fs = static_cast<RGWLibFS*>(rgw_fs->fs_private);
+ struct rados_cluster_stat_t stats;
+
+ RGWGetClusterStatReq req(fs->get_context(), fs->get_user(), stats);
+ int rc = rgwlib.get_fe()->execute_req(&req);
+ if (rc < 0) {
+ lderr(fs->get_context()) << "ERROR: getting total cluster usage"
+ << cpp_strerror(-rc) << dendl;
+ return rc;
+ }
- /* XXX for now, just publish a huge capacity and
- * limited utiliztion */
- vfs_st->f_bsize = 1024*1024 /* 1M */;
- vfs_st->f_frsize = 1024; /* minimal allocation unit (who cares) */
- vfs_st->f_blocks = UINT64_MAX;
- vfs_st->f_bfree = UINT64_MAX;
- vfs_st->f_bavail = UINT64_MAX;
- vfs_st->f_files = 1024; /* object count, do we have an est? */
- vfs_st->f_ffree = UINT64_MAX;
+ //Set block size to 1M.
+ constexpr uint32_t CEPH_BLOCK_SHIFT = 20;
+ vfs_st->f_bsize = 1 << CEPH_BLOCK_SHIFT;
+ vfs_st->f_frsize = 1 << CEPH_BLOCK_SHIFT;
+ vfs_st->f_blocks = stats.kb >> (CEPH_BLOCK_SHIFT - 10);
+ vfs_st->f_bfree = stats.kb_avail >> (CEPH_BLOCK_SHIFT - 10);
+ vfs_st->f_bavail = stats.kb_avail >> (CEPH_BLOCK_SHIFT - 10);
+ vfs_st->f_files = stats.num_objects;
+ vfs_st->f_ffree = -1;
vfs_st->f_fsid[0] = fs->get_fsid();
vfs_st->f_fsid[1] = fs->get_fsid();
vfs_st->f_flag = 0;
{
RGWFileHandle* rgw_fh = get_rgwfh(fh);
- /* XXX
+ /* XXX
* need to track specific opens--at least read opens and
* a write open; we need to know when a write open is returned,
* that closes a write transaction
}; /* RGWSetAttrsRequest */
+/*
+ * Send request to get the rados cluster stats
+ */
+class RGWGetClusterStatReq : public RGWLibRequest,
+ public RGWGetClusterStat {
+public:
+ struct rados_cluster_stat_t& stats_req;
+ RGWGetClusterStatReq(CephContext* _cct,RGWUserInfo *_user,
+ rados_cluster_stat_t& _stats):
+ RGWLibRequest(_cct, _user), stats_req(_stats){
+ op = this;
+ }
+
+ int op_init() override {
+ // assign store, s, and dialect_handler
+ RGWObjectCtx* rados_ctx
+ = static_cast<RGWObjectCtx*>(get_state()->obj_ctx);
+ // framework promises to call op_init after parent init
+ assert(rados_ctx);
+ RGWOp::init(rados_ctx->store, get_state(), this);
+ op = this; // assign self as op: REQUIRED
+ return 0;
+ }
+
+ int header_init() override {
+ struct req_state* s = get_state();
+ s->info.method = "GET";
+ s->op = OP_GET;
+ s->user = user;
+ return 0;
+ }
+
+ int get_params() override { return 0; }
+ bool only_bucket() override { return false; }
+ void send_response() override {
+ stats_req.kb = stats_op.kb;
+ stats_req.kb_avail = stats_op.kb_avail;
+ stats_req.kb_used = stats_op.kb_used;
+ stats_req.num_objects = stats_op.num_objects;
+ }
+}; /* RGWGetClusterStatReq */
+
+
} /* namespace rgw */
#endif /* RGW_FILE_H */
return op_ret;
});
}
+
+void RGWGetClusterStat::execute()
+{
+ op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
+}
+
+
virtual uint32_t op_mask() { return RGW_OP_TYPE_WRITE; }
};
+class RGWGetClusterStat : public RGWOp {
+protected:
+ struct rados_cluster_stat_t stats_op;
+public:
+ RGWGetClusterStat() {}
+
+ void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ RGWOp::init(store, s, h);
+ }
+ int verify_permission() override {return 0;}
+ virtual void send_response() = 0;
+ virtual int get_params() = 0;
+ void execute() override;
+ virtual const string name() { return "get_cluster_stat"; }
+};
+
+
+
#endif /* CEPH_RGW_OP_H */