#define ldout(cct, v) dout_impl(cct, dout_subsys, v) dout_prefix
#define lderr(cct) dout_impl(cct, ceph_subsys_, -1) dout_prefix
+#define ldpp_subdout(dpp, sub, v) \
+ if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \
+ dout_impl(pdpp->get_cct(), ceph_subsys_##sub, v) \
+ pdpp->gen_prefix(*_dout)
+
#define ldpp_dout(dpp, v) \
if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \
dout_impl(pdpp->get_cct(), ceph::dout::need_dynamic(pdpp->get_subsys()), v) \
op->exec(fifo::op::CLASS, fifo::op::CREATE_META, in);
}
-int get_meta(lr::IoCtx& ioctx, const std::string& oid,
+int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid,
std::optional<fifo::objv> objv, fifo::info* info,
std::uint32_t* part_header_size,
std::uint32_t* part_entry_overhead,
op.exec(fifo::op::CLASS, fifo::op::GET_META, in,
&bl, nullptr);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r >= 0) try {
fifo::op::get_meta_reply reply;
auto iter = bl.cbegin();
if (part_entry_overhead)
*part_entry_overhead = reply.part_entry_overhead;
} catch (const cb::error& err) {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " decode failed: " << err.what()
<< " tid=" << tid << dendl;
r = from_error_code(err.code());
} else if (!(probe && (r == -ENOENT || r == -ENODATA))) {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " fifo::op::GET_META failed r=" << r << " tid=" << tid
<< dendl;
op->exec(fifo::op::CLASS, fifo::op::INIT_PART, in);
}
-int push_part(lr::IoCtx& ioctx, const std::string& oid, std::string_view tag,
+int push_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::string_view tag,
std::deque<cb::list> data_bufs, std::uint64_t tid,
optional_yield y)
{
encode(pp, in);
auto retval = 0;
op.exec(fifo::op::CLASS, fifo::op::PUSH_PART, in, nullptr, &retval);
- auto r = rgw_rados_operate(ioctx, oid, &op, y, lr::OPERATION_RETURNVEC);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y, lr::OPERATION_RETURNVEC);
if (r < 0) {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " fifo::op::PUSH_PART failed r=" << r
<< " tid=" << tid << dendl;
return r;
}
if (retval < 0) {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " error handling response retval=" << retval
<< " tid=" << tid << dendl;
op->exec(fifo::op::CLASS, fifo::op::TRIM_PART, in);
}
-int list_part(lr::IoCtx& ioctx, const std::string& oid,
+int list_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid,
std::optional<std::string_view> tag, std::uint64_t ofs,
std::uint64_t max_entries,
std::vector<fifo::part_list_entry>* entries,
encode(lp, in);
cb::list bl;
op.exec(fifo::op::CLASS, fifo::op::LIST_PART, in, &bl, nullptr);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r >= 0) try {
fifo::op::list_part_reply reply;
auto iter = bl.cbegin();
if (full_part) *full_part = reply.full_part;
if (ptag) *ptag = reply.tag;
} catch (const cb::error& err) {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " decode failed: " << err.what()
<< " tid=" << tid << dendl;
r = from_error_code(err.code());
} else if (r != -ENOENT) {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " fifo::op::LIST_PART failed r=" << r << " tid=" << tid
<< dendl;
return op;
}
-int get_part_info(lr::IoCtx& ioctx, const std::string& oid,
+int get_part_info(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid,
fifo::part_header* header,
std::uint64_t tid, optional_yield y)
{
cb::list bl;
encode(gpi, in);
op.exec(fifo::op::CLASS, fifo::op::GET_PART_INFO, in, &bl, nullptr);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r >= 0) try {
fifo::op::get_part_info_reply reply;
auto iter = bl.cbegin();
decode(reply, iter);
if (header) *header = std::move(reply.header);
} catch (const cb::error& err) {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " decode failed: " << err.what()
<< " tid=" << tid << dendl;
r = from_error_code(err.code());
} else {
- lderr(static_cast<CephContext*>(ioctx.cct()))
+ ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " fifo::op::GET_PART_INFO failed r=" << r << " tid=" << tid
<< dendl;
return {};
}
-int FIFO::_update_meta(const fifo::update& update,
+int FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update,
fifo::objv version, bool* pcanceled,
std::uint64_t tid, optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
lr::ObjectWriteOperation op;
bool canceled = false;
update_meta(&op, info.version, update);
- auto r = rgw_rados_operate(ioctx, oid, &op, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r >= 0 || r == -ECANCELED) {
canceled = (r == -ECANCELED);
if (!canceled) {
if (r < 0) canceled = true;
}
if (canceled) {
- r = read_meta(tid, y);
+ r = read_meta(dpp, tid, y);
canceled = r < 0 ? false : true;
}
}
if (pcanceled) *pcanceled = canceled;
if (canceled) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled: tid=" << tid << dendl;
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " returning error: r=" << r << " tid=" << tid << dendl;
}
return r;
bool reread = false;
bool* pcanceled = nullptr;
std::uint64_t tid;
- Updater(FIFO* fifo, lr::AioCompletion* super,
+ Updater(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super,
const fifo::update& update, fifo::objv version,
bool* pcanceled, std::uint64_t tid)
- : Completion(super), fifo(fifo), update(update), version(version),
+ : Completion(dpp, super), fifo(fifo), update(update), version(version),
pcanceled(pcanceled) {}
- void handle(Ptr&& p, int r) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
if (reread)
handle_reread(std::move(p), r);
else
- handle_update(std::move(p), r);
+ handle_update(dpp, std::move(p), r);
}
- void handle_update(Ptr&& p, int r) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " handling async update_meta: tid="
<< tid << dendl;
if (r < 0 && r != -ECANCELED) {
- lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " update failed: r=" << r << " tid=" << tid << dendl;
complete(std::move(p), r);
return;
if (!canceled) {
int r = fifo->apply_update(&fifo->info, version, update, tid);
if (r < 0) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " update failed, marking canceled: r=" << r
<< " tid=" << tid << dendl;
canceled = true;
}
if (canceled) {
reread = true;
- fifo->read_meta(tid, call(std::move(p)));
+ fifo->read_meta(dpp, tid, call(std::move(p)));
return;
}
if (pcanceled)
*pcanceled = false;
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " completing: tid=" << tid << dendl;
complete(std::move(p), 0);
}
}
};
-void FIFO::_update_meta(const fifo::update& update,
+void FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update,
fifo::objv version, bool* pcanceled,
std::uint64_t tid, lr::AioCompletion* c)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
lr::ObjectWriteOperation op;
update_meta(&op, info.version, update);
- auto updater = std::make_unique<Updater>(this, c, update, version, pcanceled,
+ auto updater = std::make_unique<Updater>(dpp, this, c, update, version, pcanceled,
tid);
auto r = ioctx.aio_operate(oid, Updater::call(std::move(updater)), &op);
assert(r >= 0);
}
-int FIFO::create_part(int64_t part_num, std::string_view tag, std::uint64_t tid,
+int FIFO::create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid,
optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
lr::ObjectWriteOperation op;
op.create(false); /* We don't need exclusivity, part_init ensures
part_init(&op, tag, info.params);
auto oid = info.part_oid(part_num);
l.unlock();
- auto r = rgw_rados_operate(ioctx, oid, &op, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " part_init failed: r=" << r << " tid="
<< tid << dendl;
}
return r;
}
-int FIFO::remove_part(int64_t part_num, std::string_view tag, std::uint64_t tid,
+int FIFO::remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid,
optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
lr::ObjectWriteOperation op;
op.remove();
std::unique_lock l(m);
auto oid = info.part_oid(part_num);
l.unlock();
- auto r = rgw_rados_operate(ioctx, oid, &op, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " remove failed: r=" << r << " tid="
<< tid << dendl;
}
return r;
}
-int FIFO::process_journal(std::uint64_t tid, optional_yield y)
+int FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
std::vector<fifo::journal_entry> processed;
int r = 0;
for (auto& [n, entry] : tmpjournal) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " processing entry: entry=" << entry << " tid=" << tid
<< dendl;
switch (entry.op) {
case fifo::journal_entry::Op::create:
- r = create_part(entry.part_num, entry.part_tag, tid, y);
+ r = create_part(dpp, entry.part_num, entry.part_tag, tid, y);
if (entry.part_num > new_max) {
new_max = entry.part_num;
}
}
break;
case fifo::journal_entry::Op::remove:
- r = remove_part(entry.part_num, entry.part_tag, tid, y);
+ r = remove_part(dpp, entry.part_num, entry.part_tag, tid, y);
if (r == -ENOENT) r = 0;
if (entry.part_num >= new_tail) {
new_tail = entry.part_num + 1;
}
break;
default:
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " unknown journaled op: entry=" << entry << " tid="
<< tid << dendl;
return -EIO;
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " processing entry failed: entry=" << entry
<< " r=" << r << " tid=" << tid << dendl;
return -r;
bool canceled = true;
for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " postprocessing: i=" << i << " tid=" << tid << dendl;
std::optional<int64_t> tail_part_num;
if (processed.empty() &&
!tail_part_num &&
!max_part_num) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " nothing to update any more: i=" << i << " tid="
<< tid << dendl;
canceled = false;
auto u = fifo::update().tail_part_num(tail_part_num)
.head_part_num(head_part_num).max_push_part_num(max_part_num)
.journal_entries_rm(processed);
- r = _update_meta(u, objv, &canceled, tid, y);
+ r = _update_meta(dpp, u, objv, &canceled, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _update_meta failed: update=" << u
<< " r=" << r << " tid=" << tid << dendl;
break;
if (canceled) {
std::vector<fifo::journal_entry> new_processed;
std::unique_lock l(m);
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " update canceled, retrying: i=" << i << " tid="
<< tid << dendl;
for (auto& e : processed) {
}
}
if (r == 0 && canceled) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid=" << tid << dendl;
r = -ECANCELED;
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " failed, r=: " << r << " tid=" << tid << dendl;
}
return r;
}
-int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y)
+int FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
std::unique_lock l(m);
std::vector jentries = { info.next_journal_entry(generate_tag()) };
if (info.journal.find(jentries.front().part_num) != info.journal.end()) {
l.unlock();
- ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " new part journaled, but not processed: tid="
<< tid << dendl;
- auto r = process_journal(tid, y);
+ auto r = process_journal(dpp, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " process_journal failed: r=" << r << " tid=" << tid << dendl;
}
return r;
auto version = info.version;
if (is_head) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " needs new head: tid=" << tid << dendl;
auto new_head_jentry = jentries.front();
new_head_jentry.op = fifo::journal_entry::Op::set_head;
bool canceled = true;
for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) {
canceled = false;
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " updating metadata: i=" << i << " tid=" << tid << dendl;
auto u = fifo::update{}.journal_entries_add(jentries);
- r = _update_meta(u, version, &canceled, tid, y);
+ r = _update_meta(dpp, u, version, &canceled, tid, y);
if (r >= 0 && canceled) {
std::unique_lock l(m);
auto found = (info.journal.find(jentries.front().part_num) !=
info.journal.end());
if ((info.max_push_part_num >= jentries.front().part_num &&
info.head_part_num >= new_head_part_num)) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " raced, but journaled and processed: i=" << i
<< " tid=" << tid << dendl;
return 0;
}
if (found) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " raced, journaled but not processed: i=" << i
<< " tid=" << tid << dendl;
canceled = false;
l.unlock();
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _update_meta failed: update=" << u << " r=" << r
<< " tid=" << tid << dendl;
return r;
}
}
if (canceled) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid=" << tid << dendl;
return -ECANCELED;
}
- r = process_journal(tid, y);
+ r = process_journal(dpp, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " process_journal failed: r=" << r << " tid=" << tid << dendl;
}
return r;
}
-int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y)
+int FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
std::unique_lock l(m);
std::int64_t new_head_num = info.head_part_num + 1;
int r = 0;
if (max_push_part_num < new_head_num) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " need new part: tid=" << tid << dendl;
- r = _prepare_new_part(true, tid, y);
+ r = _prepare_new_part(dpp, true, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _prepare_new_part failed: r=" << r
<< " tid=" << tid << dendl;
return r;
}
std::unique_lock l(m);
if (info.max_push_part_num < new_head_num) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " inconsistency, push part less than head part: "
<< " tid=" << tid << dendl;
return -EIO;
bool canceled = true;
for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " updating head: i=" << i << " tid=" << tid << dendl;
auto u = fifo::update{}.head_part_num(new_head_num);
- r = _update_meta(u, version, &canceled, tid, y);
+ r = _update_meta(dpp, u, version, &canceled, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _update_meta failed: update=" << u << " r=" << r
<< " tid=" << tid << dendl;
return r;
version = info.version;
l.unlock();
if (canceled && (head_part_num >= new_head_num)) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " raced, but completed by the other caller: i=" << i
<< " tid=" << tid << dendl;
canceled = false;
}
}
if (canceled) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid=" << tid << dendl;
return -ECANCELED;
}
bool canceled = false;
uint64_t tid;
- NewPartPreparer(FIFO* f, lr::AioCompletion* super,
+ NewPartPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super,
std::vector<fifo::journal_entry> jentries,
std::int64_t new_head_part_num,
std::uint64_t tid)
- : Completion(super), f(f), jentries(std::move(jentries)),
+ : Completion(dpp, super), f(f), jentries(std::move(jentries)),
new_head_part_num(new_head_part_num), tid(tid) {}
- void handle(Ptr&& p, int r) {
- ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
if (r < 0) {
- lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _update_meta failed: r=" << r
<< " tid=" << tid << dendl;
complete(std::move(p), r);
l.unlock();
if ((max_push_part_num >= jentries.front().part_num &&
head_part_num >= new_head_part_num)) {
- ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " raced, but journaled and processed: i=" << i
<< " tid=" << tid << dendl;
complete(std::move(p), 0);
}
if (!found) {
++i;
- f->_update_meta(fifo::update{}
+ f->_update_meta(dpp, fifo::update{}
.journal_entries_add(jentries),
version, &canceled, tid, call(std::move(p)));
return;
} else {
- ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " raced, journaled but not processed: i=" << i
<< " tid=" << tid << dendl;
canceled = false;
}
// Fall through. We still need to process the journal.
}
- f->process_journal(tid, super());
+ f->process_journal(dpp, tid, super());
return;
}
};
-void FIFO::_prepare_new_part(bool is_head, std::uint64_t tid,
+void FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid,
lr::AioCompletion* c)
{
std::unique_lock l(m);
std::vector jentries = { info.next_journal_entry(generate_tag()) };
if (info.journal.find(jentries.front().part_num) != info.journal.end()) {
l.unlock();
- ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " new part journaled, but not processed: tid="
<< tid << dendl;
- process_journal(tid, c);
+ process_journal(dpp, tid, c);
return;
}
std::int64_t new_head_part_num = info.head_part_num;
}
l.unlock();
- auto n = std::make_unique<NewPartPreparer>(this, c, jentries,
+ auto n = std::make_unique<NewPartPreparer>(dpp, this, c, jentries,
new_head_part_num, tid);
auto np = n.get();
- _update_meta(fifo::update{}.journal_entries_add(jentries), version,
+ _update_meta(dpp, fifo::update{}.journal_entries_add(jentries), version,
&np->canceled, tid, NewPartPreparer::call(std::move(n)));
}
bool canceled = false;
std::uint64_t tid;
- NewHeadPreparer(FIFO* f, lr::AioCompletion* super,
+ NewHeadPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super,
bool newpart, std::int64_t new_head_num, std::uint64_t tid)
- : Completion(super), f(f), newpart(newpart), new_head_num(new_head_num),
+ : Completion(dpp, super), f(f), newpart(newpart), new_head_num(new_head_num),
tid(tid) {}
- void handle(Ptr&& p, int r) {
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
if (newpart)
handle_newpart(std::move(p), r);
else
- handle_update(std::move(p), r);
+ handle_update(dpp, std::move(p), r);
}
void handle_newpart(Ptr&& p, int r) {
}
}
- void handle_update(Ptr&& p, int r) {
+ void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
std::unique_lock l(f->m);
auto head_part_num = f->info.head_part_num;
auto version = f->info.version;
l.unlock();
if (r < 0) {
- lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _update_meta failed: r=" << r
<< " tid=" << tid << dendl;
complete(std::move(p), r);
}
if (canceled) {
if (i >= MAX_RACE_RETRIES) {
- lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid=" << tid << dendl;
complete(std::move(p), -ECANCELED);
return;
if (head_part_num < new_head_num) {
canceled = false;
++i;
- ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " updating head: i=" << i << " tid=" << tid << dendl;
- f->_update_meta(fifo::update{}.head_part_num(new_head_num),
+ f->_update_meta(dpp, fifo::update{}.head_part_num(new_head_num),
version, &this->canceled, tid, call(std::move(p)));
return;
}
}
- ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " succeeded : i=" << i << " tid=" << tid << dendl;
complete(std::move(p), 0);
return;
}
};
-void FIFO::_prepare_new_head(std::uint64_t tid, lr::AioCompletion* c)
+void FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
std::unique_lock l(m);
int64_t new_head_num = info.head_part_num + 1;
l.unlock();
if (max_push_part_num < new_head_num) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " need new part: tid=" << tid << dendl;
- auto n = std::make_unique<NewHeadPreparer>(this, c, true, new_head_num,
+ auto n = std::make_unique<NewHeadPreparer>(dpp, this, c, true, new_head_num,
tid);
- _prepare_new_part(true, tid, NewHeadPreparer::call(std::move(n)));
+ _prepare_new_part(dpp, true, tid, NewHeadPreparer::call(std::move(n)));
} else {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " updating head: tid=" << tid << dendl;
- auto n = std::make_unique<NewHeadPreparer>(this, c, false, new_head_num,
+ auto n = std::make_unique<NewHeadPreparer>(dpp, this, c, false, new_head_num,
tid);
auto np = n.get();
- _update_meta(fifo::update{}.head_part_num(new_head_num), version,
+ _update_meta(dpp, fifo::update{}.head_part_num(new_head_num), version,
&np->canceled, tid, NewHeadPreparer::call(std::move(n)));
}
}
-int FIFO::push_entries(const std::deque<cb::list>& data_bufs,
+int FIFO::push_entries(const DoutPrefixProvider *dpp, const std::deque<cb::list>& data_bufs,
std::uint64_t tid, optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
std::unique_lock l(m);
auto head_part_num = info.head_part_num;
const auto part_oid = info.part_oid(head_part_num);
l.unlock();
- auto r = push_part(ioctx, part_oid, tag, data_bufs, tid, y);
+ auto r = push_part(dpp, ioctx, part_oid, tag, data_bufs, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " push_part failed: r=" << r << " tid=" << tid << dendl;
}
return r;
push_part(ioctx, part_oid, tag, data_bufs, tid, c);
}
-int FIFO::trim_part(int64_t part_num, uint64_t ofs,
+int FIFO::trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs,
std::optional<std::string_view> tag,
bool exclusive, std::uint64_t tid,
optional_yield y)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
lr::ObjectWriteOperation op;
std::unique_lock l(m);
const auto part_oid = info.part_oid(part_num);
l.unlock();
rgw::cls::fifo::trim_part(&op, tag, ofs, exclusive);
- auto r = rgw_rados_operate(ioctx, part_oid, &op, y);
+ auto r = rgw_rados_operate(dpp, ioctx, part_oid, &op, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " trim_part failed: r=" << r << " tid=" << tid << dendl;
}
return 0;
ceph_assert(r >= 0);
}
-int FIFO::open(lr::IoCtx ioctx, std::string oid, std::unique_ptr<FIFO>* fifo,
+int FIFO::open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr<FIFO>* fifo,
optional_yield y, std::optional<fifo::objv> objv,
bool probe)
{
- auto cct = static_cast<CephContext*>(ioctx.cct());
- ldout(cct, 20)
+ ldpp_dout(dpp, 20)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering" << dendl;
fifo::info info;
std::uint32_t size;
std::uint32_t over;
- int r = get_meta(ioctx, std::move(oid), objv, &info, &size, &over, 0, y,
+ int r = get_meta(dpp, ioctx, std::move(oid), objv, &info, &size, &over, 0, y,
probe);
if (r < 0) {
if (!(probe && (r == -ENOENT || r == -ENODATA))) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " get_meta failed: r=" << r << dendl;
}
return r;
// If there are journal entries, process them, in case
// someone crashed mid-transaction.
if (!info.journal.empty()) {
- ldout(cct, 20)
+ ldpp_dout(dpp, 20)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " processing leftover journal" << dendl;
- r = f->process_journal(0, y);
+ r = f->process_journal(dpp, 0, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " process_journal failed: r=" << r << dendl;
return r;
}
return 0;
}
-int FIFO::create(lr::IoCtx ioctx, std::string oid, std::unique_ptr<FIFO>* fifo,
+int FIFO::create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr<FIFO>* fifo,
optional_yield y, std::optional<fifo::objv> objv,
std::optional<std::string_view> oid_prefix,
bool exclusive, std::uint64_t max_part_size,
std::uint64_t max_entry_size)
{
- auto cct = static_cast<CephContext*>(ioctx.cct());
- ldout(cct, 20)
+ ldpp_dout(dpp, 20)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering" << dendl;
lr::ObjectWriteOperation op;
create_meta(&op, oid, objv, oid_prefix, exclusive, max_part_size,
max_entry_size);
- auto r = rgw_rados_operate(ioctx, oid, &op, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " create_meta failed: r=" << r << dendl;
return r;
}
- r = open(std::move(ioctx), std::move(oid), fifo, y, objv);
+ r = open(dpp, std::move(ioctx), std::move(oid), fifo, y, objv);
return r;
}
-int FIFO::read_meta(std::uint64_t tid, optional_yield y) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+int FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
fifo::info _info;
std::uint32_t _phs;
std::uint32_t _peo;
- auto r = get_meta(ioctx, oid, nullopt, &_info, &_phs, &_peo, tid, y);
+ auto r = get_meta(dpp, ioctx, oid, nullopt, &_info, &_phs, &_peo, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " get_meta failed: r=" << r << " tid=" << tid << dendl;
return r;
}
return 0;
}
-int FIFO::read_meta(optional_yield y) {
+int FIFO::read_meta(const DoutPrefixProvider *dpp, optional_yield y) {
std::unique_lock l(m);
auto tid = ++next_tid;
l.unlock();
- return read_meta(tid, y);
+ return read_meta(dpp, tid, y);
}
struct Reader : public Completion<Reader> {
FIFO* fifo;
cb::list bl;
std::uint64_t tid;
- Reader(FIFO* fifo, lr::AioCompletion* super, std::uint64_t tid)
- : Completion(super), fifo(fifo), tid(tid) {}
+ Reader(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super, std::uint64_t tid)
+ : Completion(dpp, super), fifo(fifo), tid(tid) {}
- void handle(Ptr&& p, int r) {
- auto cct = fifo->cct;
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
if (r >= 0) try {
fifo::op::get_meta_reply reply;
fifo->part_entry_overhead = reply.part_entry_overhead;
}
} catch (const cb::error& err) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " failed to decode response err=" << err.what()
<< " tid=" << tid << dendl;
r = from_error_code(err.code());
} else {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " read_meta failed r=" << r
<< " tid=" << tid << dendl;
}
}
};
-void FIFO::read_meta(std::uint64_t tid, lr::AioCompletion* c)
+void FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c)
{
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
lr::ObjectReadOperation op;
fifo::op::get_meta gm;
cb::list in;
encode(gm, in);
- auto reader = std::make_unique<Reader>(this, c, tid);
+ auto reader = std::make_unique<Reader>(dpp, this, c, tid);
auto rp = reader.get();
auto r = ioctx.aio_exec(oid, Reader::call(std::move(reader)), fifo::op::CLASS,
fifo::op::GET_META, in, &rp->bl);
return {part_header_size, part_entry_overhead};
}
-int FIFO::push(const cb::list& bl, optional_yield y) {
- return push(std::vector{ bl }, y);
+int FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, optional_yield y) {
+ return push(dpp, std::vector{ bl }, y);
}
-void FIFO::push(const cb::list& bl, lr::AioCompletion* c) {
- push(std::vector{ bl }, c);
+void FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, lr::AioCompletion* c) {
+ push(dpp, std::vector{ bl }, c);
}
-int FIFO::push(const std::vector<cb::list>& data_bufs, optional_yield y)
+int FIFO::push(const DoutPrefixProvider *dpp, const std::vector<cb::list>& data_bufs, optional_yield y)
{
std::unique_lock l(m);
auto tid = ++next_tid;
auto max_entry_size = info.params.max_entry_size;
auto need_new_head = info.need_new_head();
l.unlock();
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
if (data_bufs.empty()) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " empty push, returning success tid=" << tid << dendl;
return 0;
}
// Validate sizes
for (const auto& bl : data_bufs) {
if (bl.length() > max_entry_size) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entry bigger than max_entry_size tid=" << tid << dendl;
return -E2BIG;
}
int r = 0;
if (need_new_head) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " need new head tid=" << tid << dendl;
- r = _prepare_new_head(tid, y);
+ r = _prepare_new_head(dpp, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _prepare_new_head failed: r=" << r
<< " tid=" << tid << dendl;
return r;
bool canceled = true;
while ((!remaining.empty() || !batch.empty()) &&
(retries <= MAX_RACE_RETRIES)) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " preparing push: remaining=" << remaining.size()
<< " batch=" << batch.size() << " retries=" << retries
<< " tid=" << tid << dendl;
batch.push_back(std::move(remaining.front()));
remaining.pop_front();
}
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " prepared push: remaining=" << remaining.size()
<< " batch=" << batch.size() << " retries=" << retries
<< " batch_len=" << batch_len
<< " tid=" << tid << dendl;
- auto r = push_entries(batch, tid, y);
+ auto r = push_entries(dpp, batch, tid, y);
if (r == -ERANGE) {
canceled = true;
++retries;
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " need new head tid=" << tid << dendl;
- r = _prepare_new_head(tid, y);
+ r = _prepare_new_head(dpp, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " prepare_new_head failed: r=" << r
<< " tid=" << tid << dendl;
return r;
continue;
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " push_entries failed: r=" << r
<< " tid=" << tid << dendl;
return r;
}
}
if (canceled) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid=" << tid << dendl;
return -ECANCELED;
}
f->push_entries(batch, tid, call(std::move(p)));
}
- void new_head(Ptr&& p) {
+ void new_head(const DoutPrefixProvider *dpp, Ptr&& p) {
new_heading = true;
- f->_prepare_new_head(tid, call(std::move(p)));
+ f->_prepare_new_head(dpp, tid, call(std::move(p)));
}
- void handle(Ptr&& p, int r) {
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
if (!new_heading) {
if (r == -ERANGE) {
- ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " need new head tid=" << tid << dendl;
- new_head(std::move(p));
+ new_head(dpp, std::move(p));
return;
}
if (r < 0) {
- lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " push_entries failed: r=" << r
<< " tid=" << tid << dendl;
complete(std::move(p), r);
prep_then_push(std::move(p), r);
} else {
if (r < 0) {
- lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " prepare_new_head failed: r=" << r
<< " tid=" << tid << dendl;
complete(std::move(p), r);
}
}
- Pusher(FIFO* f, std::deque<cb::list>&& remaining,
+ Pusher(const DoutPrefixProvider *dpp, FIFO* f, std::deque<cb::list>&& remaining,
std::uint64_t tid, lr::AioCompletion* super)
- : Completion(super), f(f), remaining(std::move(remaining)),
+ : Completion(dpp, super), f(f), remaining(std::move(remaining)),
tid(tid) {}
};
-void FIFO::push(const std::vector<cb::list>& data_bufs,
+void FIFO::push(const DoutPrefixProvider *dpp, const std::vector<cb::list>& data_bufs,
lr::AioCompletion* c)
{
std::unique_lock l(m);
auto max_entry_size = info.params.max_entry_size;
auto need_new_head = info.need_new_head();
l.unlock();
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
- auto p = std::make_unique<Pusher>(this, std::deque<cb::list>(data_bufs.begin(), data_bufs.end()),
+ auto p = std::make_unique<Pusher>(dpp, this, std::deque<cb::list>(data_bufs.begin(), data_bufs.end()),
tid, c);
// Validate sizes
for (const auto& bl : data_bufs) {
if (bl.length() > max_entry_size) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entry bigger than max_entry_size tid=" << tid << dendl;
Pusher::complete(std::move(p), -E2BIG);
return;
}
if (data_bufs.empty() ) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " empty push, returning success tid=" << tid << dendl;
Pusher::complete(std::move(p), 0);
return;
}
if (need_new_head) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " need new head tid=" << tid << dendl;
- p->new_head(std::move(p));
+ p->new_head(dpp, std::move(p));
} else {
p->prep_then_push(std::move(p), 0);
}
}
-int FIFO::list(int max_entries,
+int FIFO::list(const DoutPrefixProvider *dpp, int max_entries,
std::optional<std::string_view> markstr,
std::vector<list_entry>* presult, bool* pmore,
optional_yield y)
auto tid = ++next_tid;
std::int64_t part_num = info.tail_part_num;
l.unlock();
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
std::uint64_t ofs = 0;
if (markstr) {
auto marker = to_marker(*markstr);
if (!marker) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " invalid marker string: " << markstr
<< " tid= "<< tid << dendl;
return -EINVAL;
std::vector<fifo::part_list_entry> entries;
int r = 0;
while (max_entries > 0) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " max_entries=" << max_entries << " tid=" << tid << dendl;
bool part_more = false;
bool part_full = false;
auto part_oid = info.part_oid(part_num);
l.unlock();
- r = list_part(ioctx, part_oid, {}, ofs, max_entries, &entries,
+ r = list_part(dpp, ioctx, part_oid, {}, ofs, max_entries, &entries,
&part_more, &part_full, nullptr, tid, y);
if (r == -ENOENT) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " missing part, rereading metadata"
<< " tid= "<< tid << dendl;
- r = read_meta(tid, y);
+ r = read_meta(dpp, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " read_meta failed: r=" << r
<< " tid= "<< tid << dendl;
return r;
}
if (part_num < info.tail_part_num) {
/* raced with trim? restart */
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " raced with trim, restarting: tid=" << tid << dendl;
max_entries += result.size();
result.clear();
ofs = 0;
continue;
}
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " assuming part was not written yet, so end of data: "
<< "tid=" << tid << dendl;
more = false;
break;
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " list_entries failed: r=" << r
<< " tid= "<< tid << dendl;
return r;
}
if (!part_full) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " head part is not full, so we can assume we're done: "
<< "tid=" << tid << dendl;
break;
return 0;
}
-int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y)
+int FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y)
{
bool overshoot = false;
auto marker = to_marker(markstr);
const auto max_part_size = info.params.max_part_size;
if (part_num > hn) {
l.unlock();
- auto r = read_meta(tid, y);
+ auto r = read_meta(dpp, tid, y);
if (r < 0) {
return r;
}
}
auto pn = info.tail_part_num;
l.unlock();
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
int r = 0;
while (pn < part_num) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " pn=" << pn << " tid=" << tid << dendl;
std::unique_lock l(m);
l.unlock();
- r = trim_part(pn, max_part_size, std::nullopt, false, tid, y);
+ r = trim_part(dpp, pn, max_part_size, std::nullopt, false, tid, y);
if (r < 0 && r == -ENOENT) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " trim_part failed: r=" << r
<< " tid= "<< tid << dendl;
return r;
}
++pn;
}
- r = trim_part(part_num, ofs, std::nullopt, exclusive, tid, y);
+ r = trim_part(dpp, part_num, ofs, std::nullopt, exclusive, tid, y);
if (r < 0 && r != -ENOENT) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " trim_part failed: r=" << r
<< " tid= "<< tid << dendl;
return r;
while ((tail_part_num < part_num) &&
canceled &&
(retries <= MAX_RACE_RETRIES)) {
- r = _update_meta(fifo::update{}.tail_part_num(part_num), objv, &canceled,
+ r = _update_meta(dpp, fifo::update{}.tail_part_num(part_num), objv, &canceled,
tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " _update_meta failed: r=" << r
<< " tid= "<< tid << dendl;
return r;
}
if (canceled) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled: retries=" << retries
<< " tid=" << tid << dendl;
l.lock();
}
}
if (canceled) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid=" << tid << dendl;
return -EIO;
}
bool overshoot = false;
int retries = 0;
- Trimmer(FIFO* fifo, std::int64_t part_num, std::uint64_t ofs, std::int64_t pn,
+ Trimmer(const DoutPrefixProvider *dpp, FIFO* fifo, std::int64_t part_num, std::uint64_t ofs, std::int64_t pn,
bool exclusive, lr::AioCompletion* super, std::uint64_t tid)
- : Completion(super), fifo(fifo), part_num(part_num), ofs(ofs), pn(pn),
+ : Completion(dpp, super), fifo(fifo), part_num(part_num), ofs(ofs), pn(pn),
exclusive(exclusive), tid(tid) {}
- void handle(Ptr&& p, int r) {
- auto cct = fifo->cct;
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
if (reread) {
reread = false;
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " read_meta failed: r="
<< r << " tid=" << tid << dendl;
complete(std::move(p), r);
}
pn = tail_part_num;
if (pn < part_num) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " pn=" << pn << " tid=" << tid << dendl;
fifo->trim_part(pn++, max_part_size, std::nullopt,
false, tid, call(std::move(p)));
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< (update ? " update_meta " : " trim ") << "failed: r="
<< r << " tid=" << tid << dendl;
complete(std::move(p), r);
}
if (!update) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " handling preceding trim callback: tid=" << tid << dendl;
retries = 0;
if (pn < part_num) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " pn=" << pn << " tid=" << tid << dendl;
std::unique_lock l(fifo->m);
const auto max_part_size = fifo->info.params.max_part_size;
return;
}
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " handling update-needed callback: tid=" << tid << dendl;
std::unique_lock l(fifo->m);
auto tail_part_num = fifo->info.tail_part_num;
if ((tail_part_num < part_num) &&
canceled) {
if (retries > MAX_RACE_RETRIES) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid=" << tid << dendl;
complete(std::move(p), -EIO);
return;
}
++retries;
- fifo->_update_meta(fifo::update{}
+ fifo->_update_meta(dpp, fifo::update{}
.tail_part_num(part_num), objv, &canceled,
tid, call(std::move(p)));
} else {
}
};
-void FIFO::trim(std::string_view markstr, bool exclusive,
+void FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive,
lr::AioCompletion* c) {
auto marker = to_marker(markstr);
auto realmark = marker.value_or(::rgw::cls::fifo::marker{});
const auto part_oid = info.part_oid(pn);
auto tid = ++next_tid;
l.unlock();
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
- auto trimmer = std::make_unique<Trimmer>(this, realmark.num, realmark.ofs,
+ auto trimmer = std::make_unique<Trimmer>(dpp, this, realmark.num, realmark.ofs,
pn, exclusive, c, tid);
if (!marker) {
Trimmer::complete(std::move(trimmer), -EINVAL);
auto ofs = marker->ofs;
if (marker->num > hn) {
trimmer->reread = true;
- read_meta(tid, Trimmer::call(std::move(trimmer)));
+ read_meta(dpp, tid, Trimmer::call(std::move(trimmer)));
return;
}
if (pn < marker->num) {
- ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " pn=" << pn << " tid=" << tid << dendl;
ofs = max_part_size;
} else {
tid, Trimmer::call(std::move(trimmer)));
}
-int FIFO::get_part_info(int64_t part_num,
+int FIFO::get_part_info(const DoutPrefixProvider *dpp, int64_t part_num,
fifo::part_header* header,
optional_yield y)
{
const auto part_oid = info.part_oid(part_num);
auto tid = ++next_tid;
l.unlock();
- auto r = rgw::cls::fifo::get_part_info(ioctx, part_oid, header, tid, y);
+ auto r = rgw::cls::fifo::get_part_info(dpp, ioctx, part_oid, header, tid, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " get_part_info failed: r="
<< r << " tid=" << tid << dendl;
}
std::uint64_t tid;
bool headerread = false;
- InfoGetter(FIFO* fifo, fu2::function<void(int r, fifo::part_header&&)> f,
+ InfoGetter(const DoutPrefixProvider *dpp, FIFO* fifo, fu2::function<void(int r, fifo::part_header&&)> f,
std::uint64_t tid, lr::AioCompletion* super)
- : Completion(super), fifo(fifo), f(std::move(f)), tid(tid) {}
- void handle(Ptr&& p, int r) {
+ : Completion(dpp, super), fifo(fifo), f(std::move(f)), tid(tid) {}
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
if (!headerread) {
if (r < 0) {
- lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " read_meta failed: r="
<< r << " tid=" << tid << dendl;
if (f)
auto info = fifo->meta();
auto hpn = info.head_part_num;
if (hpn < 0) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " no head, returning empty partinfo r="
<< r << " tid=" << tid << dendl;
if (f)
}
if (r < 0) {
- lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " get_part_info failed: r="
<< r << " tid=" << tid << dendl;
}
}
};
-void FIFO::get_head_info(fu2::unique_function<void(int r,
+void FIFO::get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function<void(int r,
fifo::part_header&&)> f,
lr::AioCompletion* c)
{
std::unique_lock l(m);
auto tid = ++next_tid;
l.unlock();
- auto ig = std::make_unique<InfoGetter>(this, std::move(f), tid, c);
- read_meta(tid, InfoGetter::call(std::move(ig)));
+ auto ig = std::make_unique<InfoGetter>(dpp, this, std::move(f), tid, c);
+ read_meta(dpp, tid, InfoGetter::call(std::move(ig)));
}
struct JournalProcessor : public Completion<JournalProcessor> {
return;
}
- void finish_je(Ptr&& p, int r,
+ void finish_je(const DoutPrefixProvider *dpp, Ptr&& p, int r,
const fifo::journal_entry& entry) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " finishing entry: entry=" << entry
<< " tid=" << tid << dendl;
r = 0;
if (r < 0) {
- lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " processing entry failed: entry=" << entry
<< " r=" << r << " tid=" << tid << dendl;
complete(std::move(p), r);
processed.push_back(entry);
}
++iter;
- process(std::move(p));
+ process(dpp, std::move(p));
}
- void postprocess(Ptr&& p) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void postprocess(const DoutPrefixProvider *dpp, Ptr&& p) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
if (processed.empty()) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " nothing to update any more: race_retries="
<< race_retries << " tid=" << tid << dendl;
complete(std::move(p), 0);
return;
}
- pp_run(std::move(p), 0, false);
+ pp_run(dpp, std::move(p), 0, false);
}
public:
- JournalProcessor(FIFO* fifo, std::uint64_t tid, lr::AioCompletion* super)
- : Completion(super), fifo(fifo), tid(tid) {
+ JournalProcessor(const DoutPrefixProvider *dpp, FIFO* fifo, std::uint64_t tid, lr::AioCompletion* super)
+ : Completion(dpp, super), fifo(fifo), tid(tid) {
std::unique_lock l(fifo->m);
journal = fifo->info.journal;
iter = journal.begin();
new_max = fifo->info.max_push_part_num;
}
- void pp_run(Ptr&& p, int r, bool canceled) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void pp_run(const DoutPrefixProvider *dpp, Ptr&& p, int r, bool canceled) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
std::optional<int64_t> tail_part_num;
std::optional<int64_t> head_part_num;
std::optional<int64_t> max_part_num;
if (r < 0) {
- lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " failed, r=: " << r << " tid=" << tid << dendl;
complete(std::move(p), r);
}
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " postprocessing: race_retries="
<< race_retries << " tid=" << tid << dendl;
if (!first_pp && r == 0 && !canceled) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " nothing to update any more: race_retries="
<< race_retries << " tid=" << tid << dendl;
complete(std::move(p), 0);
if (canceled) {
if (race_retries >= MAX_RACE_RETRIES) {
- lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " canceled too many times, giving up: tid="
<< tid << dendl;
complete(std::move(p), -ECANCELED);
return;
}
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " update canceled, retrying: race_retries="
<< race_retries << " tid=" << tid << dendl;
!tail_part_num &&
!max_part_num) {
/* nothing to update anymore */
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " nothing to update any more: race_retries="
<< race_retries << " tid=" << tid << dendl;
complete(std::move(p), 0);
return;
}
state = pp_callback;
- fifo->_update_meta(fifo::update{}
+ fifo->_update_meta(dpp, fifo::update{}
.tail_part_num(tail_part_num)
.head_part_num(head_part_num)
.max_push_part_num(max_part_num)
JournalProcessor(JournalProcessor&&) = delete;
JournalProcessor& operator =(JournalProcessor&&) = delete;
- void process(Ptr&& p) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void process(const DoutPrefixProvider *dpp, Ptr&& p) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
while (iter != journal.end()) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " processing entry: entry=" << *iter
<< " tid=" << tid << dendl;
const auto entry = iter->second;
return;
}
}
- postprocess(std::move(p));
+ postprocess(dpp, std::move(p));
return;
}
- void handle(Ptr&& p, int r) {
- ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " entering: tid=" << tid << dendl;
switch (state) {
case entry_callback:
- finish_je(std::move(p), r, iter->second);
+ finish_je(dpp, std::move(p), r, iter->second);
return;
case pp_callback:
auto c = canceled;
canceled = false;
- pp_run(std::move(p), r, c);
+ pp_run(dpp, std::move(p), r, c);
return;
}
};
-void FIFO::process_journal(std::uint64_t tid, lr::AioCompletion* c) {
- auto p = std::make_unique<JournalProcessor>(this, tid, c);
- p->process(std::move(p));
+void FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) {
+ auto p = std::make_unique<JournalProcessor>(dpp, this, tid, c);
+ p->process(dpp, std::move(p));
}
struct Lister : Completion<Lister> {
}
public:
- Lister(FIFO* f, std::int64_t part_num, std::uint64_t ofs, int max_entries,
+ Lister(const DoutPrefixProvider *dpp, FIFO* f, std::int64_t part_num, std::uint64_t ofs, int max_entries,
std::vector<list_entry>* entries_out, bool* more_out,
std::uint64_t tid, lr::AioCompletion* super)
- : Completion(super), f(f), part_num(part_num), ofs(ofs), max_entries(max_entries),
+ : Completion(dpp, super), f(f), part_num(part_num), ofs(ofs), max_entries(max_entries),
entries_out(entries_out), more_out(more_out), tid(tid) {
result.reserve(max_entries);
}
Lister(Lister&&) = delete;
Lister& operator =(Lister&&) = delete;
- void handle(Ptr&& p, int r) {
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
if (read)
handle_read(std::move(p), r);
else
- handle_list(std::move(p), r);
+ handle_list(dpp, std::move(p), r);
}
void list(Ptr&& p) {
return;
}
- void handle_list(Ptr&& p, int r) {
+ void handle_list(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
if (r >= 0) r = r_out;
r_out = 0;
std::unique_lock l(f->m);
l.unlock();
if (r == -ENOENT) {
read = true;
- f->read_meta(tid, call(std::move(p)));
+ f->read_meta(dpp, tid, call(std::move(p)));
return;
}
if (r < 0) {
}
};
-void FIFO::list(int max_entries,
+void FIFO::list(const DoutPrefixProvider *dpp, int max_entries,
std::optional<std::string_view> markstr,
std::vector<list_entry>* out,
bool* more,
}
}
- auto ls = std::make_unique<Lister>(this, part_num, ofs, max_entries, out,
+ auto ls = std::make_unique<Lister>(dpp, this, part_num, ofs, max_entries, out,
more, tid, c);
if (markstr && !marker) {
auto l = ls.get();
bool exclusive = false,
std::uint64_t max_part_size = default_max_part_size,
std::uint64_t max_entry_size = default_max_entry_size);
-int get_meta(lr::IoCtx& ioctx, const std::string& oid,
+int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid,
std::optional<fifo::objv> objv, fifo::info* info,
std::uint32_t* part_header_size,
std::uint32_t* part_entry_overhead,
std::uint64_t tid, optional_yield y,
bool probe = false);
-
struct marker {
std::int64_t num = 0;
std::uint64_t ofs = 0;
const fifo::objv& objv,
const fifo::update& update,
std::uint64_t tid);
- int _update_meta(const fifo::update& update,
+ int _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update,
fifo::objv version, bool* pcanceled,
std::uint64_t tid, optional_yield y);
- void _update_meta(const fifo::update& update,
+ void _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update,
fifo::objv version, bool* pcanceled,
std::uint64_t tid, lr::AioCompletion* c);
- int create_part(int64_t part_num, std::string_view tag, std::uint64_t tid,
+ int create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid,
optional_yield y);
- int remove_part(int64_t part_num, std::string_view tag, std::uint64_t tid,
+ int remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid,
optional_yield y);
- int process_journal(std::uint64_t tid, optional_yield y);
- void process_journal(std::uint64_t tid, lr::AioCompletion* c);
- int _prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y);
- void _prepare_new_part(bool is_head, std::uint64_t tid, lr::AioCompletion* c);
- int _prepare_new_head(std::uint64_t tid, optional_yield y);
- void _prepare_new_head(std::uint64_t tid, lr::AioCompletion* c);
- int push_entries(const std::deque<cb::list>& data_bufs,
+ int process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y);
+ void process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c);
+ int _prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, optional_yield y);
+ void _prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, lr::AioCompletion* c);
+ int _prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y);
+ void _prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c);
+ int push_entries(const DoutPrefixProvider *dpp, const std::deque<cb::list>& data_bufs,
std::uint64_t tid, optional_yield y);
void push_entries(const std::deque<cb::list>& data_bufs,
std::uint64_t tid, lr::AioCompletion* c);
- int trim_part(int64_t part_num, uint64_t ofs,
+ int trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs,
std::optional<std::string_view> tag, bool exclusive,
std::uint64_t tid, optional_yield y);
void trim_part(int64_t part_num, uint64_t ofs,
std::uint64_t tid, lr::AioCompletion* c);
/// Force refresh of metadata, yielding/blocking style
- int read_meta(std::uint64_t tid, optional_yield y);
+ int read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y);
/// Force refresh of metadata, with a librados Completion
- void read_meta(std::uint64_t tid, lr::AioCompletion* c);
+ void read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c);
public:
FIFO& operator =(FIFO&&) = delete;
/// Open an existing FIFO.
- static int open(lr::IoCtx ioctx, //< IO Context
+ static int open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context
std::string oid, //< OID for metadata object
std::unique_ptr<FIFO>* fifo, //< OUT: Pointer to FIFO object
optional_yield y, //< Optional yield context
/// can't find it.
bool probe = false);
/// Create a new or open an existing FIFO.
- static int create(lr::IoCtx ioctx, //< IO Context
+ static int create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context
std::string oid, //< OID for metadata object
std::unique_ptr<FIFO>* fifo, //< OUT: Pointer to FIFO object
optional_yield y, //< Optional yield context
std::uint64_t max_entry_size = default_max_entry_size);
/// Force refresh of metadata, yielding/blocking style
- int read_meta(optional_yield y);
+ int read_meta(const DoutPrefixProvider *dpp, optional_yield y);
/// Get currently known metadata
const fifo::info& meta() const;
/// Get partition header and entry overhead size
std::pair<std::uint32_t, std::uint32_t> get_part_layout_info() const;
/// Push an entry to the FIFO
- int push(const cb::list& bl, //< Entry to push
+ int push(const DoutPrefixProvider *dpp,
+ const cb::list& bl, //< Entry to push
optional_yield y //< Optional yield
);
/// Push an entry to the FIFO
- void push(const cb::list& bl, //< Entry to push
+ void push(const DoutPrefixProvider *dpp, const cb::list& bl, //< Entry to push
lr::AioCompletion* c //< Async Completion
);
/// Push entries to the FIFO
- int push(const std::vector<cb::list>& data_bufs, //< Entries to push
+ int push(const DoutPrefixProvider *dpp,
+ const std::vector<cb::list>& data_bufs, //< Entries to push
optional_yield y //< Optional yield
);
/// Push entries to the FIFO
- void push(const std::vector<cb::list>& data_bufs, //< Entries to push
+ void push(const DoutPrefixProvider *dpp, const std::vector<cb::list>& data_bufs, //< Entries to push
lr::AioCompletion* c //< Async Completion
);
/// List entries
- int list(int max_entries, //< Maximum entries to list
+ int list(const DoutPrefixProvider *dpp,
+ int max_entries, //< Maximum entries to list
/// Point after which to begin listing. Start at tail if null
std::optional<std::string_view> markstr,
std::vector<list_entry>* out, //< OUT: entries
bool* more,
optional_yield y //< Optional yield
);
- void list(int max_entries, //< Maximum entries to list
+ void list(const DoutPrefixProvider *dpp,
+ int max_entries, //< Maximum entries to list
/// Point after which to begin listing. Start at tail if null
std::optional<std::string_view> markstr,
std::vector<list_entry>* out, //< OUT: entries
lr::AioCompletion* c //< Async Completion
);
/// Trim entries, coroutine/block style
- int trim(std::string_view markstr, //< Position to which to trim, inclusive
+ int trim(const DoutPrefixProvider *dpp,
+ std::string_view markstr, //< Position to which to trim, inclusive
bool exclusive, //< If true, do not trim the target entry
//< itself, just all those before it.
optional_yield y //< Optional yield
);
/// Trim entries, librados AioCompletion style
- void trim(std::string_view markstr, //< Position to which to trim, inclusive
+ void trim(const DoutPrefixProvider *dpp,
+ std::string_view markstr, //< Position to which to trim, inclusive
bool exclusive, //< If true, do not trim the target entry
//< itself, just all those before it.
lr::AioCompletion* c //< librados AIO Completion
);
/// Get part info
- int get_part_info(int64_t part_num, /// Part number
+ int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, /// Part number
fifo::part_header* header, //< OUT: Information
optional_yield y //< Optional yield
);
/// A convenience method to fetch the part information for the FIFO
/// head, using librados::AioCompletion, since
/// libradio::AioCompletions compose lousily.
- void get_head_info(fu2::unique_function< //< Function to receive info
+ void get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function< //< Function to receive info
void(int r, fifo::part_header&&)>,
lr::AioCompletion* c //< AIO Completion
);
template<typename T>
struct Completion {
private:
+ const DoutPrefixProvider *_dpp;
lr::AioCompletion* _cur = nullptr;
lr::AioCompletion* _super;
public:
return _super;
}
- Completion(lr::AioCompletion* super) : _super(super) {
+ Completion(const DoutPrefixProvider *dpp, lr::AioCompletion* super) : _dpp(dpp), _super(super) {
super->pc->get();
}
auto r = t->_cur->get_return_value();
t->_cur->release();
t->_cur = nullptr;
- t->handle(Ptr(t), r);
+ t->handle(t->_dpp, Ptr(t), r);
}
};
#include <string>
#include <mutex>
-
#define dout_subsys ceph_subsys_rgw
bool global_stop = false;
}
}
- void RGWLibProcess::handle_request(RGWRequest* r)
+ void RGWLibProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r)
{
/*
* invariant: valid requests are derived from RGWLibRequst
g_conf()->rgw_run_sync_thread &&
g_conf()->rgw_nfs_run_sync_thread;
- const DoutPrefix dp(cct.get(), dout_subsys, "librgw: ");
- store = StoreManager::get_storage(&dp, g_ceph_context,
+ store = StoreManager::get_storage(this, g_ceph_context,
"rados",
run_gc,
run_lc,
int RGWLibRequest::read_permissions(RGWOp* op, optional_yield y) {
/* bucket and object ops */
- const DoutPrefix dp(store->ctx(), dout_subsys, "librgw: ");
int ret =
- rgw_build_bucket_policies(&dp, rgwlib.get_store(), get_state(), y);
+ rgw_build_bucket_policies(op, rgwlib.get_store(), get_state(), y);
if (ret < 0) {
- ldpp_dout(&dp, 10) << "read_permissions (bucket policy) on "
+ ldpp_dout(op, 10) << "read_permissions (bucket policy) on "
<< get_state()->bucket << ":"
<< get_state()->object
<< " only_bucket=" << only_bucket()
ret = -EACCES;
} else if (! only_bucket()) {
/* object ops */
- ret = rgw_build_object_policies(&dp, rgwlib.get_store(), get_state(),
+ ret = rgw_build_object_policies(op, rgwlib.get_store(), get_state(),
op->prefetch_data(), y);
if (ret < 0) {
- ldpp_dout(&dp, 10) << "read_permissions (object policy) on"
+ ldpp_dout(op, 10) << "read_permissions (object policy) on"
<< get_state()->bucket << ":"
<< get_state()->object
<< " ret=" << ret << dendl;
return perm_mask & auth_identity.get_perms_from_aclspec(dpp, acl_user_map);
}
-uint32_t RGWAccessControlList::get_group_perm(ACLGroupTypeEnum group,
+uint32_t RGWAccessControlList::get_group_perm(const DoutPrefixProvider *dpp,
+ ACLGroupTypeEnum group,
const uint32_t perm_mask) const
{
- ldout(cct, 5) << "Searching permissions for group=" << (int)group
+ ldpp_dout(dpp, 5) << "Searching permissions for group=" << (int)group
<< " mask=" << perm_mask << dendl;
const auto iter = acl_group_map.find((uint32_t)group);
if (iter != acl_group_map.end()) {
- ldout(cct, 5) << "Found permission: " << iter->second << dendl;
+ ldpp_dout(dpp, 5) << "Found permission: " << iter->second << dendl;
return iter->second & perm_mask;
}
- ldout(cct, 5) << "Permissions for group not found" << dendl;
+ ldpp_dout(dpp, 5) << "Permissions for group not found" << dendl;
return 0;
}
/* should we continue looking up? */
if (!ignore_public_acls && ((perm & perm_mask) != perm_mask)) {
- perm |= acl.get_group_perm(ACL_GROUP_ALL_USERS, perm_mask);
+ perm |= acl.get_group_perm(dpp, ACL_GROUP_ALL_USERS, perm_mask);
if (false == auth_identity.is_owner_of(rgw_user(RGW_USER_ANON_ID))) {
/* this is not the anonymous user */
- perm |= acl.get_group_perm(ACL_GROUP_AUTHENTICATED_USERS, perm_mask);
+ perm |= acl.get_group_perm(dpp, ACL_GROUP_AUTHENTICATED_USERS, perm_mask);
}
}
}
-bool RGWAccessControlPolicy::is_public() const
+bool RGWAccessControlPolicy::is_public(const DoutPrefixProvider *dpp) const
{
static constexpr auto public_groups = {ACL_GROUP_ALL_USERS,
ACL_GROUP_AUTHENTICATED_USERS};
return std::any_of(public_groups.begin(), public_groups.end(),
- [&](ACLGroupTypeEnum g) {
- auto p = acl.get_group_perm(g, RGW_PERM_FULL_CONTROL);
+ [&, dpp](ACLGroupTypeEnum g) {
+ auto p = acl.get_group_perm(dpp, g, RGW_PERM_FULL_CONTROL);
return (p != RGW_PERM_NONE) && (p != RGW_PERM_INVALID);
}
);
uint32_t get_perm(const DoutPrefixProvider* dpp,
const rgw::auth::Identity& auth_identity,
uint32_t perm_mask);
- uint32_t get_group_perm(ACLGroupTypeEnum group, uint32_t perm_mask) const;
+ uint32_t get_group_perm(const DoutPrefixProvider *dpp, ACLGroupTypeEnum group, uint32_t perm_mask) const;
uint32_t get_referer_perm(uint32_t current_perm,
std::string http_referer,
uint32_t perm_mask);
}
virtual bool compare_group_name(string& id, ACLGroupTypeEnum group) { return false; }
- bool is_public() const;
+ bool is_public(const DoutPrefixProvider *dpp) const;
friend bool operator==(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs);
friend bool operator!=(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs);
std::unique_ptr<rgw::sal::User> user = store->get_user(owner->get_id());
if (user->load_user(dpp, null_yield) < 0) {
- ldout(cct, 10) << "owner info does not exist" << dendl;
+ ldpp_dout(dpp, 10) << "owner info does not exist" << dendl;
err_msg = "Invalid id";
return -EINVAL;
}
return -EINVAL;
}
email = u.id;
- ldout(cct, 10) << "grant user email=" << email << dendl;
+ ldpp_dout(dpp, 10) << "grant user email=" << email << dendl;
if (store->get_user_by_email(dpp, email, null_yield, &user) < 0) {
- ldout(cct, 10) << "grant user email not found or other error" << dendl;
+ ldpp_dout(dpp, 10) << "grant user email not found or other error" << dendl;
err_msg = "The e-mail address you provided does not match any account on record.";
return -ERR_UNRESOLVABLE_EMAIL;
}
if (grant_user.user_id.empty()) {
user = store->get_user(uid);
if (user->load_user(dpp, null_yield) < 0) {
- ldout(cct, 10) << "grant user does not exist:" << uid << dendl;
+ ldpp_dout(dpp, 10) << "grant user does not exist:" << uid << dendl;
err_msg = "Invalid id";
return -EINVAL;
} else {
grant_ok = true;
rgw_user new_id;
new_grant.get_id(new_id);
- ldout(cct, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl;
+ ldpp_dout(dpp, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl;
}
break;
case ACL_TYPE_GROUP:
RGWObjectCtx obj_ctx(store);
int ret = obj->get_obj_attrs(&obj_ctx, null_yield, dpp());
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp(), -1) << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret) << dendl;
return ret;
}
string status = (needs_fixing ? "needs_fixing" : "ok");
if ((needs_fixing || remove_bad) && fix) {
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->fix_head_obj_locator(obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key());
+ ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->fix_head_obj_locator(dpp(), obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key());
if (ret < 0) {
cerr << "ERROR: fix_head_object_locator() returned ret=" << ret << std::endl;
goto done;
ceph::bufferlist response;
rgw_user user;
- int ret = conn->forward(user, info, nullptr, MAX_REST_RESPONSE, &in_data, &response, null_yield);
+ int ret = conn->forward(dpp(), user, info, nullptr, MAX_REST_RESPONSE, &in_data, &response, null_yield);
int parse_ret = parser.parse(response.c_str(), response.length());
if (parse_ret < 0) {
RGWRESTSimpleRequest req(g_ceph_context, info.method, url, NULL, ¶ms, opt_region);
bufferlist response;
- int ret = req.forward_request(key, info, MAX_REST_RESPONSE, &in_data, &response, null_yield);
+ int ret = req.forward_request(dpp(), key, info, MAX_REST_RESPONSE, &in_data, &response, null_yield);
int parse_ret = parser.parse(response.c_str(), response.length());
if (parse_ret < 0) {
if (store->get_zone()->get_id() == master_zone) {
// read the current period
RGWPeriod current_period;
- int ret = current_period.init(g_ceph_context,
+ int ret = current_period.init(dpp(), g_ceph_context,
static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(),
null_yield);
if (ret < 0) {
}
// the master zone gave us back the period that it committed, so it's
// safe to save it as our latest epoch
- ret = period.store_info(false, null_yield);
+ ret = period.store_info(dpp(), false, null_yield);
if (ret < 0) {
cerr << "Error storing committed period " << period.get_id() << ": "
<< cpp_strerror(ret) << std::endl;
return ret;
}
- ret = period.set_latest_epoch(null_yield, period.get_epoch());
+ ret = period.set_latest_epoch(dpp(), null_yield, period.get_epoch());
if (ret < 0) {
cerr << "Error updating period epoch: " << cpp_strerror(ret) << std::endl;
return ret;
}
- ret = period.reflect(null_yield);
+ ret = period.reflect(dpp(), null_yield);
if (ret < 0) {
cerr << "Error updating local objects: " << cpp_strerror(ret) << std::endl;
return ret;
}
- realm.notify_new_period(period, null_yield);
+ realm.notify_new_period(dpp(), period, null_yield);
return ret;
}
Formatter *formatter, bool force)
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0 ) {
cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl;
return ret;
epoch = atoi(period_epoch.c_str());
}
RGWPeriod period(period_id, epoch);
- ret = period.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(), null_yield);
+ ret = period.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(), null_yield);
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
return ret;
}
period.fork();
- ret = period.update(null_yield);
+ ret = period.update(dpp(), null_yield);
if(ret < 0) {
// Dropping the error message here, as both the ret codes were handled in
// period.update()
return ret;
}
- ret = period.store_info(false, null_yield);
+ ret = period.store_info(dpp(), false, null_yield);
if (ret < 0) {
cerr << "failed to store period: " << cpp_strerror(-ret) << std::endl;
return ret;
cerr << "request failed: " << cpp_strerror(-ret) << std::endl;
return ret;
}
- ret = period->init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
+ ret = period->init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
if (ret < 0) {
cerr << "faile to init period " << cpp_strerror(-ret) << std::endl;
return ret;
cout << "failed to decode JSON input: " << e.what() << std::endl;
return -EINVAL;
}
- ret = period->store_info(false, null_yield);
+ ret = period->store_info(dpp(), false, null_yield);
if (ret < 0) {
cerr << "Error storing period " << period->get_id() << ": " << cpp_strerror(ret) << std::endl;
}
// store latest epoch (ignore errors)
- period->update_latest_epoch(period->get_epoch(), null_yield);
+ period->update_latest_epoch(dpp(), period->get_epoch(), null_yield);
return 0;
}
std::string* period_id)
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield);
if (ret < 0) {
std::cerr << "failed to read realm: " << cpp_strerror(-ret) << std::endl;
return ret;
{
RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
- int ret = sync.init();
+ int ret = sync.init(dpp());
if (ret < 0) {
status.push_back(string("failed to retrieve sync info: sync.init() failed: ") + cpp_strerror(-ret));
return;
}
rgw_meta_sync_status sync_status;
- ret = sync.read_sync_status(&sync_status);
+ ret = sync.read_sync_status(dpp(), &sync_status);
if (ret < 0) {
status.push_back(string("failed to read sync status: ") + cpp_strerror(-ret));
return;
map<int, RGWMetadataLogInfo> master_shards_info;
string master_period = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_current_period_id();
- ret = sync.read_master_log_shards_info(master_period, &master_shards_info);
+ ret = sync.read_master_log_shards_info(dpp(), master_period, &master_shards_info);
if (ret < 0) {
status.push_back(string("failed to fetch master sync status: ") + cpp_strerror(-ret));
return;
push_ss(ss, status) << "behind shards: " << "[" << shards_behind_set << "]";
map<int, rgw_mdlog_shard_data> master_pos;
- ret = sync.read_master_log_shards_next(sync_status.sync_info.period, shards_behind, &master_pos);
+ ret = sync.read_master_log_shards_next(dpp(), sync_status.sync_info.period, shards_behind, &master_pos);
if (ret < 0) {
derr << "ERROR: failed to fetch master next positions (" << cpp_strerror(-ret) << ")" << dendl;
} else {
}
RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr);
- int ret = sync.init();
+ int ret = sync.init(dpp());
if (ret < 0) {
push_ss(ss, status, tab) << string("failed to retrieve sync info: ") + cpp_strerror(-ret);
flush_ss(ss, status);
}
rgw_data_sync_status sync_status;
- ret = sync.read_sync_status(&sync_status);
+ ret = sync.read_sync_status(dpp(), &sync_status);
if (ret < 0 && ret != -ENOENT) {
push_ss(ss, status, tab) << string("failed read sync status: ") + cpp_strerror(-ret);
return;
}
set<int> recovering_shards;
- ret = sync.read_recovering_shards(sync_status.sync_info.num_shards, recovering_shards);
+ ret = sync.read_recovering_shards(dpp(), sync_status.sync_info.num_shards, recovering_shards);
if (ret < 0 && ret != ENOENT) {
push_ss(ss, status, tab) << string("failed read recovering shards: ") + cpp_strerror(-ret);
return;
map<int, RGWDataChangesLogInfo> source_shards_info;
- ret = sync.read_source_log_shards_info(&source_shards_info);
+ ret = sync.read_source_log_shards_info(dpp(), &source_shards_info);
if (ret < 0) {
push_ss(ss, status, tab) << string("failed to fetch source sync status: ") + cpp_strerror(-ret);
return;
push_ss(ss, status, tab) << "behind shards: " << "[" << shards_behind_set << "]" ;
map<int, rgw_datalog_shard_data> master_pos;
- ret = sync.read_source_log_shards_next(shards_behind, &master_pos);
+ ret = sync.read_source_log_shards_next(dpp(), shards_behind, &master_pos);
if (ret < 0) {
derr << "ERROR: failed to fetch next positions (" << cpp_strerror(-ret) << ")" << dendl;
} else {
return out << std::setw(h.w) << h.header << std::setw(1) << ' ';
}
-static int bucket_source_sync_status(rgw::sal::RadosStore* store, const RGWZone& zone,
+static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const RGWZone& zone,
const RGWZone& source, RGWRESTConn *conn,
const RGWBucketInfo& bucket_info,
rgw_sync_bucket_pipe pipe,
}
if (!pipe.source.bucket) {
- lderr(store->ctx()) << __func__ << "(): missing source bucket" << dendl;
+ ldpp_dout(dpp, -1) << __func__ << "(): missing source bucket" << dendl;
return -EINVAL;
}
std::unique_ptr<rgw::sal::Bucket> source_bucket;
int r = init_bucket(nullptr, *pipe.source.bucket, &source_bucket);
if (r < 0) {
- lderr(store->ctx()) << "failed to read source bucket info: " << cpp_strerror(r) << dendl;
+ ldpp_dout(dpp, -1) << "failed to read source bucket info: " << cpp_strerror(r) << dendl;
return r;
}
pipe.dest.bucket = bucket_info.bucket;
std::vector<rgw_bucket_shard_sync_info> status;
- r = rgw_bucket_sync_status(dpp(), store, pipe, bucket_info, &source_bucket->get_info(), &status);
+ r = rgw_bucket_sync_status(dpp, store, pipe, bucket_info, &source_bucket->get_info(), &status);
if (r < 0) {
- lderr(store->ctx()) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl;
+ ldpp_dout(dpp, -1) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl;
return r;
}
out << indented{width} << "incremental sync: " << num_inc << "/" << total_shards << " shards\n";
BucketIndexShardsManager remote_markers;
- r = rgw_read_remote_bilog_info(conn, source_bucket->get_key(), remote_markers, null_yield);
+ r = rgw_read_remote_bilog_info(dpp, conn, source_bucket->get_key(), remote_markers, null_yield);
if (r < 0) {
- lderr(store->ctx()) << "failed to read remote log: " << cpp_strerror(r) << dendl;
+ ldpp_dout(dpp, -1) << "failed to read remote log: " << cpp_strerror(r) << dendl;
return r;
}
bucket_handler.reset(handler->alloc_child(*eff_bucket, nullopt));
}
- ret = bucket_handler->init(null_yield);
+ ret = bucket_handler->init(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: failed to init bucket sync policy handler: " << cpp_strerror(-ret) << " (ret=" << ret << ")" << std::endl;
return ret;
int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
return r;
}
int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
return r;
}
if (!source_zone_id.empty()) {
auto z = zonegroup.zones.find(source_zone_id);
if (z == zonegroup.zones.end()) {
- lderr(store->ctx()) << "Source zone not found in zonegroup "
+ ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup "
<< zonegroup.get_name() << dendl;
return -EINVAL;
}
auto c = zone_conn_map.find(source_zone_id);
if (c == zone_conn_map.end()) {
- lderr(store->ctx()) << "No connection to zone " << z->second.name << dendl;
+ ldpp_dout(dpp(), -1) << "No connection to zone " << z->second.name << dendl;
return -EINVAL;
}
zone_ids.insert(source_zone_id);
continue;
}
if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) {
- bucket_source_sync_status(store, zone, z->second,
+ bucket_source_sync_status(dpp(), store, zone, z->second,
c->second,
info, pipe,
width, out);
shard_id);
// call cls_log_trim() until it returns -ENODATA
for (;;) {
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->timelog.trim(oid, {}, {}, {}, marker, nullptr,
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->timelog.trim(dpp(), oid, {}, {}, {}, marker, nullptr,
null_yield);
if (ret == -ENODATA) {
return 0;
b(_bucket) {}
int init() {
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return ret;
int write_policy() {
if (!b) {
- int ret = zonegroup.update(null_yield);
+ int ret = zonegroup.update(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWPeriod period(period_id);
- int ret = period.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = period.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "period.init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = period.delete_obj(null_yield);
+ ret = period.delete_obj(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: couldn't delete period: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (staging) {
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0 ) {
cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl;
return -ret;
epoch = 1;
}
RGWPeriod period(period_id, epoch);
- int ret = period.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id,
+ int ret = period.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id,
null_yield, realm_name);
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
case OPT::PERIOD_LIST:
{
list<string> periods;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_periods(periods);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_periods(dpp(), periods);
if (ret < 0) {
cerr << "failed to list periods: " << cpp_strerror(-ret) << std::endl;
return -ret;
if (url.empty()) {
// load current period for endpoints
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWPeriod current_period(realm.get_current_period());
- ret = current_period.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ ret = current_period.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init current period: " << cpp_strerror(-ret) << std::endl;
return -ret;
RGWRealm realm(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj);
if (!realm_name.empty()) {
// look up realm_id for the given realm_name
- int ret = realm.read_id(realm_name, realm_id, null_yield);
+ int ret = realm.read_id(dpp(), realm_name, realm_id, null_yield);
if (ret < 0) {
cerr << "ERROR: failed to read realm for " << realm_name
<< ": " << cpp_strerror(-ret) << std::endl;
}
} else {
// use default realm_id when none is given
- int ret = realm.read_default_id(realm_id, null_yield);
+ int ret = realm.read_default_id(dpp(), realm_id, null_yield);
if (ret < 0 && ret != -ENOENT) { // on ENOENT, use empty realm_id
cerr << "ERROR: failed to read default realm: "
<< cpp_strerror(-ret) << std::endl;
}
RGWPeriodConfig period_config;
- int ret = period_config.read(static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, null_yield);
+ int ret = period_config.read(dpp(), static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: failed to read period config: "
<< cpp_strerror(-ret) << std::endl;
if (opt_cmd != OPT::GLOBAL_QUOTA_GET) {
// write the modified period config
- ret = period_config.write(static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, null_yield);
+ ret = period_config.write(dpp(), static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, null_yield);
if (ret < 0) {
cerr << "ERROR: failed to write period config: "
<< cpp_strerror(-ret) << std::endl;
}
if (set_default) {
- ret = realm.set_as_default(null_yield);
+ ret = realm.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
cerr << "missing realm name or id" << std::endl;
return EINVAL;
}
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = realm.delete_obj(null_yield);
+ ret = realm.delete_obj(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: couldn't : " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT::REALM_GET:
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
if (ret == -ENOENT && realm_name.empty() && realm_id.empty()) {
cerr << "missing realm name or id, or default realm not found" << std::endl;
{
RGWRealm realm(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj);
string default_id;
- int ret = realm.read_default_id(default_id, null_yield);
+ int ret = realm.read_default_id(dpp(), default_id, null_yield);
if (ret == -ENOENT) {
cout << "No default realm is set" << std::endl;
return -ret;
{
RGWRealm realm(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj);
string default_id;
- int ret = realm.read_default_id(default_id, null_yield);
+ int ret = realm.read_default_id(dpp(), default_id, null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "could not determine default realm: " << cpp_strerror(-ret) << std::endl;
}
list<string> realms;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_realms(realms);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_realms(dpp(), realms);
if (ret < 0) {
cerr << "failed to list realms: " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
list<string> periods;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_periods(period_id, periods, null_yield);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_periods(dpp(), period_id, periods, null_yield);
if (ret < 0) {
cerr << "list periods failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "missing realm name or id" << std::endl;
return EINVAL;
}
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = realm.rename(realm_new_name, null_yield);
+ ret = realm.rename(dpp(), realm_new_name, null_yield);
if (ret < 0) {
cerr << "realm.rename failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWRealm realm(realm_id, realm_name);
bool new_realm = false;
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
return 1;
}
} else {
- ret = realm.update(null_yield);
+ ret = realm.update(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: couldn't store realm info: " << cpp_strerror(-ret) << std::endl;
return 1;
}
if (set_default) {
- ret = realm.set_as_default(null_yield);
+ ret = realm.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
case OPT::REALM_DEFAULT:
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = realm.set_as_default(null_yield);
+ ret = realm.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set realm as default: " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
RGWRealm realm;
- realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
+ realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
try {
decode_json_obj(realm, &p);
} catch (const JSONDecoder::err& e) {
<< cpp_strerror(ret) << std::endl;
return -ret;
} else if (ret ==-EEXIST) {
- ret = realm.update(null_yield);
+ ret = realm.update(dpp(), null_yield);
if (ret < 0) {
cerr << "Error storing realm " << realm.get_id() << ": "
<< cpp_strerror(ret) << std::endl;
}
if (set_default) {
- ret = realm.set_as_default(null_yield);
+ ret = realm.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
}
RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to initialize zonegroup " << zonegroup_name << " id " << zonegroup_id << " :"
<< cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone(zone_id, zone_name);
- ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (zone.realm_id != zonegroup.realm_id) {
zone.realm_id = zonegroup.realm_id;
- ret = zone.update(null_yield);
+ ret = zone.update(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl;
return -ret;
bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr);
string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr);
- ret = zonegroup.add_zone(zone,
+ ret = zonegroup.add_zone(dpp(), zone,
(is_master_set ? &is_master : NULL),
(is_read_only_set ? &read_only : NULL),
endpoints, ptier_type,
return EINVAL;
}
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (set_default) {
- ret = zonegroup.set_as_default(null_yield);
+ ret = zonegroup.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = zonegroup.set_as_default(null_yield);
+ ret = zonegroup.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zonegroup as default: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = zonegroup.delete_obj(null_yield);
+ ret = zonegroup.delete_obj(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: couldn't delete zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT::ZONEGROUP_GET:
{
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT::ZONEGROUP_LIST:
{
RGWZoneGroup zonegroup;
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
null_yield, false);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
}
list<string> zonegroups;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_zonegroups(zonegroups);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_zonegroups(dpp(), zonegroups);
if (ret < 0) {
cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
string default_zonegroup;
- ret = zonegroup.read_default_id(default_zonegroup, null_yield);
+ ret = zonegroup.read_default_id(dpp(), default_zonegroup, null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "could not determine default zonegroup: " << cpp_strerror(-ret) << std::endl;
}
case OPT::ZONEGROUP_MODIFY:
{
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (is_master_set) {
- zonegroup.update_master(is_master, null_yield);
+ zonegroup.update_master(dpp(), is_master, null_yield);
need_update = true;
}
} else if (!realm_name.empty()) {
// get realm id from name
RGWRealm realm{g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj};
- ret = realm.read_id(realm_name, zonegroup.realm_id, null_yield);
+ ret = realm.read_id(dpp(), realm_name, zonegroup.realm_id, null_yield);
if (ret < 0) {
cerr << "failed to find realm by name " << realm_name << std::endl;
return -ret;
}
if (need_update) {
- ret = zonegroup.update(null_yield);
+ ret = zonegroup.update(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (set_default) {
- ret = zonegroup.set_as_default(null_yield);
+ ret = zonegroup.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
case OPT::ZONEGROUP_SET:
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
bool default_realm_not_exist = (ret == -ENOENT && realm_id.empty() && realm_name.empty());
if (ret < 0 && !default_realm_not_exist ) {
}
RGWZoneGroup zonegroup;
- ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
+ ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
null_yield, false);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
cerr << "ERROR: couldn't create zonegroup info: " << cpp_strerror(-ret) << std::endl;
return 1;
} else if (ret == -EEXIST) {
- ret = zonegroup.update(null_yield);
+ ret = zonegroup.update(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: couldn't store zonegroup info: " << cpp_strerror(-ret) << std::endl;
return 1;
}
if (set_default) {
- ret = zonegroup.set_as_default(null_yield);
+ ret = zonegroup.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
case OPT::ZONEGROUP_REMOVE:
{
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
}
- ret = zonegroup.remove_zone(zone_id, null_yield);
+ ret = zonegroup.remove_zone(dpp(), zone_id, null_yield);
if (ret < 0) {
cerr << "failed to remove zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = zonegroup.rename(zonegroup_new_name, null_yield);
+ ret = zonegroup.rename(dpp(), zonegroup_new_name, null_yield);
if (ret < 0) {
cerr << "failed to rename zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT::ZONEGROUP_PLACEMENT_LIST:
{
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj,
null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
zonegroup.default_placement = rule;
}
- zonegroup.post_process_params(null_yield);
- ret = zonegroup.update(null_yield);
+ zonegroup.post_process_params(dpp(), null_yield);
+ ret = zonegroup.update(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
/* if the user didn't provide zonegroup info , create stand alone zone */
if (!zonegroup_id.empty() || !zonegroup_name.empty()) {
- ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zonegroup " << zonegroup_name << ": " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone(zone_id, zone_name);
- ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
+ ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
string *ptier_type = (tier_type_specified ? &tier_type : nullptr);
bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr);
string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr);
- ret = zonegroup.add_zone(zone,
+ ret = zonegroup.add_zone(dpp(), zone,
(is_master_set ? &is_master : NULL),
(is_read_only_set ? &read_only : NULL),
endpoints,
}
if (set_default) {
- ret = zone.set_as_default(null_yield);
+ ret = zone.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
case OPT::ZONE_DEFAULT:
{
RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
}
return EINVAL;
}
RGWZoneParams zone(zone_id, zone_name);
- ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = zone.set_as_default(null_yield);
+ ret = zone.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zone as default: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
list<string> zonegroups;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_zonegroups(zonegroups);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_zonegroups(dpp(), zonegroups);
if (ret < 0) {
cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl;
return -ret;
for (list<string>::iterator iter = zonegroups.begin(); iter != zonegroups.end(); ++iter) {
RGWZoneGroup zonegroup(string(), *iter);
- int ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
continue;
}
- ret = zonegroup.remove_zone(zone.get_id(), null_yield);
+ ret = zonegroup.remove_zone(dpp(), zone.get_id(), null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "failed to remove zone " << zone_name << " from zonegroup " << zonegroup.get_name() << ": "
<< cpp_strerror(-ret) << std::endl;
}
}
- ret = zone.delete_obj(null_yield);
+ ret = zone.delete_obj(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to delete zone " << zone_name << ": " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT::ZONE_GET:
{
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT::ZONE_SET:
{
RGWZoneParams zone(zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield,
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield,
false);
if (ret < 0) {
return -ret;
}
- ret = zone.read(null_yield);
+ ret = zone.read(dpp(), null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "zone.read() returned ret=" << ret << std::endl;
return -ret;
if(zone.realm_id.empty()) {
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
cerr << "zone id " << zone.get_id();
- ret = zone.fix_pool_names(null_yield);
+ ret = zone.fix_pool_names(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: couldn't fix zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = zone.write(false, null_yield);
+ ret = zone.write(dpp(), false, null_yield);
if (ret < 0) {
cerr << "ERROR: couldn't create zone: " << cpp_strerror(-ret) << std::endl;
return 1;
}
if (set_default) {
- ret = zone.set_as_default(null_yield);
+ ret = zone.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
case OPT::ZONE_LIST:
{
list<string> zones;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_zones(zones);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_zones(dpp(), zones);
if (ret < 0) {
cerr << "failed to list zones: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone;
- ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
+ ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield, false);
if (ret < 0) {
cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
string default_zone;
- ret = zone.read_default_id(default_zone, null_yield);
+ ret = zone.read_default_id(dpp(), default_zone, null_yield);
if (ret < 0 && ret != -ENOENT) {
cerr << "could not determine default zone: " << cpp_strerror(-ret) << std::endl;
}
case OPT::ZONE_MODIFY:
{
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
} else if (!realm_name.empty()) {
// get realm id from name
RGWRealm realm{g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj};
- ret = realm.read_id(realm_name, zone.realm_id, null_yield);
+ ret = realm.read_id(dpp(), realm_name, zone.realm_id, null_yield);
if (ret < 0) {
cerr << "failed to find realm by name " << realm_name << std::endl;
return -ret;
}
if (need_zone_update) {
- ret = zone.update(null_yield);
+ ret = zone.update(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr);
string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr);
- ret = zonegroup.add_zone(zone,
+ ret = zonegroup.add_zone(dpp(), zone,
(is_master_set ? &is_master : NULL),
(is_read_only_set ? &read_only : NULL),
endpoints, ptier_type,
return -ret;
}
- ret = zonegroup.update(null_yield);
+ ret = zonegroup.update(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (set_default) {
- ret = zone.set_as_default(null_yield);
+ ret = zone.set_as_default(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl;
}
return EINVAL;
}
RGWZoneParams zone(zone_id,zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = zone.rename(zone_new_name, null_yield);
+ ret = zone.rename(dpp(), zone_new_name, null_yield);
if (ret < 0) {
cerr << "failed to rename zone " << zone_name << " to " << zone_new_name << ": " << cpp_strerror(-ret)
<< std::endl;
return -ret;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
} else {
- ret = zonegroup.rename_zone(zone, null_yield);
+ ret = zonegroup.rename_zone(dpp(), zone, null_yield);
if (ret < 0) {
cerr << "Error in zonegroup rename for " << zone_name << ": " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
if (opt_cmd == OPT::ZONE_PLACEMENT_ADD ||
opt_cmd == OPT::ZONE_PLACEMENT_MODIFY) {
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- ret = zonegroup.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ ret = zonegroup.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
}
- ret = zone.update(null_yield);
+ ret = zone.update(dpp(), null_yield);
if (ret < 0) {
cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT::ZONE_PLACEMENT_LIST:
{
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = zone.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
// load the period
RGWPeriod period(period_id);
- int ret = period.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = period.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
{
// read realm and staging period
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
+ int ret = realm.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, null_yield);
if (ret < 0) {
cerr << "Error initializing realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWPeriod period(RGWPeriod::get_staging_id(realm.get_id()), 1);
- ret = period.init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(), null_yield);
+ ret = period.init(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(), null_yield);
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
} else {
/* list users in groups of max-keys, then perform user-bucket
* limit-check on each group */
- ret = store->meta_list_keys_init(metadata_key, string(), &handle);
+ ret = store->meta_list_keys_init(dpp(), metadata_key, string(), &handle);
if (ret < 0) {
cerr << "ERROR: buckets limit check can't get user metadata_key: "
<< cpp_strerror(-ret) << std::endl;
if (opt_cmd == OPT::BUCKET_STATS) {
if (bucket_name.empty() && !bucket_id.empty()) {
rgw_bucket bucket;
- if (!rgw_find_bucket_by_id(store->ctx(), store, marker, bucket_id, &bucket)) {
+ if (!rgw_find_bucket_by_id(dpp(), store->ctx(), store, marker, bucket_id, &bucket)) {
cerr << "failure: no such bucket id" << std::endl;
return -ENOENT;
}
formatter->reset();
formatter->open_array_section("logs");
RGWAccessHandle h;
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_list_init(date, &h);
+ int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_list_init(dpp(), date, &h);
if (r == -ENOENT) {
// no logs.
} else {
if (opt_cmd == OPT::LOG_SHOW) {
RGWAccessHandle h;
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_init(oid, &h);
+ int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_init(dpp(), oid, &h);
if (r < 0) {
cerr << "error opening log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
cout << std::endl;
}
if (opt_cmd == OPT::LOG_RM) {
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_remove(oid);
+ int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_remove(dpp(), oid);
if (r < 0) {
cerr << "error removing log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
exit(1);
}
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->add_bucket_placement(pool, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->add_bucket_placement(dpp(), pool, null_yield);
if (ret < 0)
cerr << "failed to add bucket placement: " << cpp_strerror(-ret) << std::endl;
}
exit(1);
}
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->remove_bucket_placement(pool, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->remove_bucket_placement(dpp(), pool, null_yield);
if (ret < 0)
cerr << "failed to remove bucket placement: " << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT::POOLS_LIST) {
set<rgw_pool> pools;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_placement_set(pools, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_placement_set(dpp(), pools, null_yield);
if (ret < 0) {
cerr << "could not list placement set: " << cpp_strerror(-ret) << std::endl;
return -ret;
return 1;
}
- ret = RGWUsage::clear(store);
+ ret = RGWUsage::clear(dpp(), store);
if (ret < 0) {
return ret;
}
}
RGWOLHInfo olh;
rgw_obj obj(bucket->get_key(), object);
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_olh(bucket->get_info(), obj, &olh);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_olh(dpp(), bucket->get_info(), obj, &olh);
if (ret < 0) {
cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl;
return -ret;
rgw_cls_bi_entry entry;
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->bi_get(bucket->get_info(), obj, bi_index_type, &entry);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry);
if (ret < 0) {
cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl;
return -ret;
entry.old_num_shards = num_source_shards;
entry.new_num_shards = num_shards;
- return reshard.add(entry);
+ return reshard.add(dpp(), entry);
}
if (opt_cmd == OPT::RESHARD_LIST) {
RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(store), bucket->get_info(), bucket->get_attrs(), nullptr /* no callback */);
list<cls_rgw_bucket_instance_entry> status;
- int r = br.get_status(&status);
+ int r = br.get_status(dpp(), &status);
if (r < 0) {
cerr << "ERROR: could not get resharding status for bucket " <<
bucket_name << std::endl;
// we did not encounter an error, so let's work with the bucket
RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(store), bucket->get_info(), bucket->get_attrs(),
nullptr /* no callback */);
- int ret = br.cancel();
+ int ret = br.cancel(dpp());
if (ret < 0) {
if (ret == -EBUSY) {
cerr << "There is ongoing resharding, please retry after " <<
entry.bucket_name = bucket_name;
//entry.bucket_id = bucket_id;
- ret = reshard.remove(entry);
+ ret = reshard.remove(dpp(), entry);
if (ret < 0 && ret != -ENOENT) {
cerr << "Error in updating reshard log with bucket " <<
bucket_name << ": " << cpp_strerror(-ret) << std::endl;
rgw_obj_index_key index_key;
key.get_index_key(&index_key);
oid_list.push_back(index_key);
- ret = bucket->remove_objs_from_index(oid_list);
+ ret = bucket->remove_objs_from_index(dpp(), oid_list);
if (ret < 0) {
cerr << "ERROR: remove_obj_from_index() returned error: " << cpp_strerror(-ret) << std::endl;
return 1;
info.job_name = job_id;
info.num_shards = num_shards;
- int ret = search.init(job_id, &info, detail);
+ int ret = search.init(dpp(), job_id, &info, detail);
if (ret < 0) {
cerr << "could not init search, ret=" << ret << std::endl;
return -ret;
cerr << "ERROR: --job-id not specified" << std::endl;
return EINVAL;
}
- int ret = search.init(job_id, NULL);
+ int ret = search.init(dpp(), job_id, NULL);
if (ret < 0) {
if (ret == -ENOENT) {
cerr << "job not found" << std::endl;
}
RGWOrphanStore orphan_store(store);
- int ret = orphan_store.init();
+ int ret = orphan_store.init(dpp());
if (ret < 0){
cerr << "connection to cluster failed!" << std::endl;
return -ret;
"so at most one of the two should be specified" << std::endl;
return EINVAL;
}
- ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->user->reset_stats(user->get_id(), null_yield);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->user->reset_stats(dpp(), user->get_id(), null_yield);
if (ret < 0) {
cerr << "ERROR: could not reset user stats: " << cpp_strerror(-ret) <<
std::endl;
cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = bucket->sync_user_stats(null_yield);
+ ret = bucket->sync_user_stats(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: could not sync bucket stats: " <<
cpp_strerror(-ret) << std::endl;
RGWStorageStats stats;
ceph::real_time last_stats_sync;
ceph::real_time last_stats_update;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->user->read_stats(user->get_id(), &stats, null_yield,
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->user->read_stats(dpp(), user->get_id(), &stats, null_yield,
&last_stats_sync,
&last_stats_update);
if (ret < 0) {
}
void *handle;
int max = 1000;
- int ret = store->meta_list_keys_init(metadata_key, marker, &handle);
+ int ret = store->meta_list_keys_init(dpp(), metadata_key, marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return -ret;
meta_log->init_list_entries(i, {}, {}, marker, &handle);
bool truncated;
do {
- int ret = meta_log->list_entries(handle, 1000, entries, NULL, &truncated);
+ int ret = meta_log->list_entries(dpp(), handle, 1000, entries, NULL, &truncated);
if (ret < 0) {
cerr << "ERROR: meta_log->list_entries(): " << cpp_strerror(-ret) << std::endl;
return -ret;
for (; i < g_ceph_context->_conf->rgw_md_log_max_shards; i++) {
RGWMetadataLogInfo info;
- meta_log->get_info(i, &info);
+ meta_log->get_info(dpp(), i, &info);
::encode_json("info", info, formatter.get());
}
auto num_shards = g_conf()->rgw_md_log_max_shards;
- ret = crs.run(create_admin_meta_log_trim_cr(dpp(), static_cast<rgw::sal::RadosStore*>(store), &http, num_shards));
+ ret = crs.run(dpp(), create_admin_meta_log_trim_cr(dpp(), static_cast<rgw::sal::RadosStore*>(store), &http, num_shards));
if (ret < 0) {
cerr << "automated mdlog trim failed with " << cpp_strerror(ret) << std::endl;
return -ret;
// trim until -ENODATA
do {
- ret = meta_log->trim(shard_id, {}, {}, {}, marker);
+ ret = meta_log->trim(dpp(), shard_id, {}, {}, {}, marker);
} while (ret == 0);
if (ret < 0 && ret != -ENODATA) {
cerr << "ERROR: meta_log->trim(): " << cpp_strerror(-ret) << std::endl;
if (opt_cmd == OPT::METADATA_SYNC_STATUS) {
RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
- int ret = sync.init();
+ int ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
rgw_meta_sync_status sync_status;
- ret = sync.read_sync_status(&sync_status);
+ ret = sync.read_sync_status(dpp(), &sync_status);
if (ret < 0) {
cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl;
return -ret;
if (opt_cmd == OPT::METADATA_SYNC_INIT) {
RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
- int ret = sync.init();
+ int ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
- ret = sync.init_sync_status();
+ ret = sync.init_sync_status(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl;
return -ret;
if (opt_cmd == OPT::METADATA_SYNC_RUN) {
RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
- int ret = sync.init();
+ int ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
- ret = sync.run(null_yield);
+ ret = sync.run(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: sync.run() returned ret=" << ret << std::endl;
return -ret;
}
RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr);
- int ret = sync.init();
+ int ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
set<string> pending_buckets;
set<string> recovering_buckets;
rgw_data_sync_marker sync_marker;
- ret = sync.read_shard_status(shard_id, pending_buckets, recovering_buckets, &sync_marker,
+ ret = sync.read_shard_status(dpp(), shard_id, pending_buckets, recovering_buckets, &sync_marker,
max_entries_specified ? max_entries : 20);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: sync.read_shard_status() returned ret=" << ret << std::endl;
formatter->close_section();
formatter->flush(cout);
} else {
- ret = sync.read_sync_status(&sync_status);
+ ret = sync.read_sync_status(dpp(), &sync_status);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl;
return -ret;
RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr);
- int ret = sync.init();
+ int ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
- ret = sync.init_sync_status();
+ ret = sync.init_sync_status(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl;
return -ret;
int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->sync_modules->get_manager()->create_instance(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone().tier_type,
store->get_zone()->get_params().tier_config, &sync_module);
if (ret < 0) {
- lderr(cct) << "ERROR: failed to init sync module instance, ret=" << ret << dendl;
+ ldpp_dout(dpp(), -1) << "ERROR: failed to init sync module instance, ret=" << ret << dendl;
return ret;
}
RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module);
- ret = sync.init();
+ ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
- ret = sync.run();
+ ret = sync.run(dpp());
if (ret < 0) {
cerr << "ERROR: sync.run() returned ret=" << ret << std::endl;
return -ret;
RGWBucketPipeSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), source_zone, opt_sb, bucket->get_key());
- ret = sync.init();
+ ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
- ret = sync.init_sync_status();
+ ret = sync.init_sync_status(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl;
return -ret;
opt_source_zone, opt_source_bucket,
opt_retry_delay_ms, timeout_at);
if (ret < 0) {
- lderr(store->ctx()) << "bucket sync checkpoint failed: " << cpp_strerror(ret) << dendl;
+ ldpp_dout(dpp(), -1) << "bucket sync checkpoint failed: " << cpp_strerror(ret) << dendl;
return -ret;
}
}
}
RGWBucketPipeSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), source_zone, opt_source_bucket, bucket->get_key());
- ret = sync.init();
+ ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
- ret = sync.read_sync_status();
+ ret = sync.read_sync_status(dpp());
if (ret < 0) {
cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl;
return -ret;
}
RGWBucketPipeSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), source_zone, opt_source_bucket, bucket->get_key());
- ret = sync.init();
+ ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: sync.init() returned ret=" << ret << std::endl;
return -ret;
}
- ret = sync.run();
+ ret = sync.run(dpp());
if (ret < 0) {
cerr << "ERROR: sync.run() returned ret=" << ret << std::endl;
return -ret;
do {
list<rgw_bi_log_entry> entries;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_list(bucket->get_info(), shard_id, marker, max_entries - count, entries, &truncated);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_list(dpp(), bucket->get_info(), shard_id, marker, max_entries - count, entries, &truncated);
if (ret < 0) {
cerr << "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret) << std::endl;
return -ret;
do {
list<cls_log_entry> entries;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->timelog.list(oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated,
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->timelog.list(dpp(), oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated,
null_yield);
if (ret == -ENOENT) {
break;
cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_trim(bucket->get_info(), shard_id, start_marker, end_marker);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_trim(dpp(), bucket->get_info(), shard_id, start_marker, end_marker);
if (ret < 0) {
cerr << "ERROR: trim_bi_log_entries(): " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
map<int, string> markers;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->get_log_status(bucket->get_info(), shard_id,
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->get_log_status(dpp(), bucket->get_info(), shard_id,
&markers, null_yield);
if (ret < 0) {
cerr << "ERROR: get_bi_log_status(): " << cpp_strerror(-ret) << std::endl;
cerr << "trim manager init failed with " << cpp_strerror(ret) << std::endl;
return -ret;
}
- ret = crs.run(trim.create_admin_bucket_trim_cr(&http));
+ ret = crs.run(dpp(), trim.create_admin_bucket_trim_cr(&http));
if (ret < 0) {
cerr << "automated bilog trim failed with " << cpp_strerror(ret) << std::endl;
return -ret;
do {
std::vector<rgw_data_change_log_entry> entries;
if (specified_shard_id) {
- ret = datalog_svc->list_entries(shard_id, max_entries - count,
+ ret = datalog_svc->list_entries(dpp(), shard_id, max_entries - count,
entries,
marker.empty() ?
std::nullopt :
std::make_optional(marker),
&marker, &truncated);
} else {
- ret = datalog_svc->list_entries(max_entries - count, entries,
+ ret = datalog_svc->list_entries(dpp(), max_entries - count, entries,
log_marker, &truncated);
}
if (ret < 0) {
list<cls_log_entry> entries;
RGWDataChangesLogInfo info;
- static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->get_info(i, &info);
+ static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->get_info(dpp(), i, &info);
::encode_json("info", info, formatter.get());
auto num_shards = g_conf()->rgw_data_log_num_shards;
std::vector<std::string> markers(num_shards);
- ret = crs.run(create_admin_data_log_trim_cr(static_cast<rgw::sal::RadosStore*>(store), &http, num_shards, markers));
+ ret = crs.run(dpp(), create_admin_data_log_trim_cr(dpp(), static_cast<rgw::sal::RadosStore*>(store), &http, num_shards, markers));
if (ret < 0) {
cerr << "automated datalog trim failed with " << cpp_strerror(ret) << std::endl;
return -ret;
// loop until -ENODATA
do {
auto datalog = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados;
- ret = datalog->trim_entries(shard_id, marker);
+ ret = datalog->trim_entries(dpp(), shard_id, marker);
} while (ret == 0);
if (ret < 0 && ret != -ENODATA) {
return -EINVAL;
}
auto datalog = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados;
- ret = datalog->change_format(*opt_log_type, null_yield);
+ ret = datalog->change_format(dpp(), *opt_log_type, null_yield);
if (ret < 0) {
cerr << "ERROR: change_format(): " << cpp_strerror(-ret) << std::endl;
return -ret;
if (opt_cmd == OPT::DATALOG_PRUNE) {
auto datalog = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados;
std::optional<uint64_t> through;
- ret = datalog->trim_generations(through);
+ ret = datalog->trim_generations(dpp(), through);
if (ret < 0) {
cerr << "ERROR: trim_generations(): " << cpp_strerror(-ret) << std::endl;
null_yield, dpp(),
MDLOG_STATUS_WRITE,
[&] {
- return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.create_mfa(user->get_id(), config, &objv_tracker, mtime, null_yield);
+ return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA creation failed, error: " << cpp_strerror(-ret) << std::endl;
null_yield, dpp(),
MDLOG_STATUS_WRITE,
[&] {
- return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.remove_mfa(user->get_id(), totp_serial, &objv_tracker, mtime, null_yield);
+ return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.remove_mfa(dpp(), user->get_id(), totp_serial, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA removal failed, error: " << cpp_strerror(-ret) << std::endl;
}
rados::cls::otp::otp_info_t result;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.get_mfa(user->get_id(), totp_serial, &result, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &result, null_yield);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA) {
cerr << "MFA serial id not found" << std::endl;
}
list<rados::cls::otp::otp_info_t> result;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.list_mfa(user->get_id(), &result, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.list_mfa(dpp(), user->get_id(), &result, null_yield);
if (ret < 0) {
cerr << "MFA listing failed, error: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
list<rados::cls::otp::otp_info_t> result;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.check_mfa(user->get_id(), totp_serial, totp_pin.front(), null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.check_mfa(dpp(), user->get_id(), totp_serial, totp_pin.front(), null_yield);
if (ret < 0) {
cerr << "MFA check failed, error: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
rados::cls::otp::otp_info_t config;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.get_mfa(user->get_id(), totp_serial, &config, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &config, null_yield);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA) {
cerr << "MFA serial id not found" << std::endl;
ceph::real_time now;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.otp_get_current_time(user->get_id(), &now, null_yield);
+ ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.otp_get_current_time(dpp(), user->get_id(), &now, null_yield);
if (ret < 0) {
cerr << "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret) << std::endl;
return -ret;
null_yield, dpp(),
MDLOG_STATUS_WRITE,
[&] {
- return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.create_mfa(user->get_id(), config, &objv_tracker, mtime, null_yield);
+ return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA update failed, error: " << cpp_strerror(-ret) << std::endl;
RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(store), tenant);
- ret = ps.remove_topic(topic_name, null_yield);
+ ret = ps.remove_topic(dpp(), topic_name, null_yield);
if (ret < 0) {
cerr << "ERROR: could not remove topic: " << cpp_strerror(-ret) << std::endl;
return -ret;
RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(store), tenant);
auto sub = ps.get_sub(sub_name);
- ret = sub->unsubscribe(topic_name, null_yield);
+ ret = sub->unsubscribe(dpp(), topic_name, null_yield);
if (ret < 0) {
cerr << "ERROR: could not get subscription info: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "ERROR: lua package name was not provided (via --package)" << std::endl;
return EINVAL;
}
- const auto rc = rgw::lua::add_package(store, null_yield, *script_package, bool(allow_compilation));
+ const auto rc = rgw::lua::add_package(dpp(), store, null_yield, *script_package, bool(allow_compilation));
if (rc < 0) {
cerr << "ERROR: failed to add lua package: " << script_package << " .error: " << rc << std::endl;
return -rc;
cerr << "ERROR: lua package name was not provided (via --package)" << std::endl;
return EINVAL;
}
- const auto rc = rgw::lua::remove_package(store, null_yield, *script_package);
+ const auto rc = rgw::lua::remove_package(dpp(), store, null_yield, *script_package);
if (rc == -ENOENT) {
cerr << "WARNING: package " << script_package << " did not exists or already removed" << std::endl;
return 0;
if (opt_cmd == OPT::SCRIPT_PACKAGE_LIST) {
#ifdef WITH_RADOSGW_LUA_PACKAGES
rgw::lua::packages_t packages;
- const auto rc = rgw::lua::list_packages(store, null_yield, packages);
+ const auto rc = rgw::lua::list_packages(dpp(), store, null_yield, packages);
if (rc == -ENOENT) {
std::cout << "no lua packages in allowlist" << std::endl;
} else if (rc < 0) {
//Check if user_id.buckets already exists, may have been from the time, when shadow users didnt exist
RGWStorageStats stats;
- int ret = user->read_stats(null_yield, &stats);
+ int ret = user->read_stats(dpp, null_yield, &stats);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: reading stats for the user returned error " << ret << dendl;
return;
* ?get the canonical representation of the object's location
*/
static std::string
-get_canon_resource(const char* const request_uri,
+get_canon_resource(const DoutPrefixProvider *dpp, const char* const request_uri,
const std::map<std::string, std::string>& sub_resources)
{
std::string dest;
}
}
- dout(10) << "get_canon_resource(): dest=" << dest << dendl;
+ ldpp_dout(dpp, 10) << "get_canon_resource(): dest=" << dest << dendl;
return dest;
}
* compute a request's signature
*/
void rgw_create_s3_canonical_header(
+ const DoutPrefixProvider *dpp,
const char* const method,
const char* const content_md5,
const char* const content_type,
dest.append(get_canon_amz_hdr(meta_map));
dest.append(get_canon_amz_hdr(qs_map));
- dest.append(get_canon_resource(request_uri, sub_resources));
+ dest.append(get_canon_resource(dpp, request_uri, sub_resources));
dest_str = dest;
}
* get the header authentication information required to
* compute a request's signature
*/
-bool rgw_create_s3_canonical_header(const req_info& info,
+bool rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp,
+ const req_info& info,
utime_t* const header_time,
std::string& dest,
const bool qsr)
if (content_md5) {
for (const char *p = content_md5; *p; p++) {
if (!is_base64_for_content_md5(*p)) {
- dout(0) << "NOTICE: bad content-md5 provided (not base64),"
+ ldpp_dout(dpp, 0) << "NOTICE: bad content-md5 provided (not base64),"
<< " aborting request p=" << *p << " " << (int)*p << dendl;
return false;
}
if (str == NULL) {
req_date = info.env->get("HTTP_DATE");
if (!req_date) {
- dout(0) << "NOTICE: missing date for auth header" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: missing date for auth header" << dendl;
return false;
}
date = req_date;
if (header_time) {
struct tm t;
if (!parse_rfc2616(req_date, &t)) {
- dout(0) << "NOTICE: failed to parse date for auth header" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: failed to parse date for auth header" << dendl;
return false;
}
if (t.tm_year < 70) {
- dout(0) << "NOTICE: bad date (predates epoch): " << req_date << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: bad date (predates epoch): " << req_date << dendl;
return false;
}
*header_time = utime_t(internal_timegm(&t), 0);
request_uri = info.effective_uri;
}
- rgw_create_s3_canonical_header(info.method, content_md5, content_type,
+ rgw_create_s3_canonical_header(dpp, info.method, content_md5, content_type,
date.c_str(), meta_map, qs_map,
request_uri.c_str(), sub_resources, dest);
return true;
} /* namespace rgw */
void rgw_create_s3_canonical_header(
+ const DoutPrefixProvider *dpp,
const char *method,
const char *content_md5,
const char *content_type,
const char *request_uri,
const std::map<std::string, std::string>& sub_resources,
std::string& dest_str);
-bool rgw_create_s3_canonical_header(const req_info& info,
+bool rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp,
+ const req_info& info,
utime_t *header_time, /* out */
std::string& dest, /* out */
bool qsr);
static inline std::tuple<bool, std::string, utime_t>
-rgw_create_s3_canonical_header(const req_info& info, const bool qsr) {
+rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, const req_info& info, const bool qsr) {
std::string dest;
utime_t header_time;
- const bool ok = rgw_create_s3_canonical_header(info, &header_time, dest, qsr);
+ const bool ok = rgw_create_s3_canonical_header(dpp, info, &header_time, dest, qsr);
return std::make_tuple(ok, dest, header_time);
}
if (ret < 0)
return ret;
- ret = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
+ ret = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
if (ret < 0)
return ret;
ret = obj->get_obj_state(dpp, &obj_ctx, &astate, y, false);
if (ret == -ENOENT) {
- dout(1) << "WARNING: cannot find obj state for obj " << obj << dendl;
+ ldpp_dout(dpp, 1) << "WARNING: cannot find obj state for obj " << obj << dendl;
continue;
}
if (ret < 0) {
if (astate->manifest) {
RGWObjManifest& manifest = *astate->manifest;
- RGWObjManifest::obj_iterator miter = manifest.obj_begin();
+ RGWObjManifest::obj_iterator miter = manifest.obj_begin(dpp);
std::unique_ptr<rgw::sal::Object> head_obj = bucket->get_object(manifest.get_obj().key);
rgw_raw_obj raw_head_obj;
head_obj->get_raw_obj(&raw_head_obj);
- for (; miter != manifest.obj_end() && max_aio--; ++miter) {
+ for (; miter != manifest.obj_end(dpp) && max_aio--; ++miter) {
if (!max_aio) {
ret = handles->drain();
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
return ret;
}
max_aio = concurrent_max;
continue;
}
- ret = store->delete_raw_obj_aio(last_obj, handles.get());
+ ret = store->delete_raw_obj_aio(dpp, last_obj, handles.get());
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl;
return ret;
}
} // for all shadow objs
ret = head_obj->delete_obj_aio(dpp, astate, handles.get(), keep_index_consistent, null_yield);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl;
return ret;
}
}
if (!max_aio) {
ret = handles->drain();
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
return ret;
}
max_aio = concurrent_max;
ret = handles->drain();
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
return ret;
}
- bucket->sync_user_stats(y);
+ bucket->sync_user_stats(dpp, y);
if (ret < 0) {
- dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
RGWObjVersionTracker objv_tracker;
// remain are detritus from a prior bug
ret = bucket->remove_bucket(dpp, true, std::string(), std::string(), false, nullptr, y);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: could not remove bucket " << bucket << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " << bucket << dendl;
return ret;
}
return 0;
}
-bool rgw_find_bucket_by_id(CephContext *cct, rgw::sal::Store* store,
+bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store,
const string& marker, const string& bucket_id, rgw_bucket* bucket_out)
{
void *handle = NULL;
bool truncated = false;
string s;
- int ret = store->meta_list_keys_init("bucket.instance", marker, &handle);
+ int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
store->meta_list_keys_complete(handle);
if (objs_to_unlink.size() > listing_max_entries) {
if (fix_index) {
- int r = bucket->remove_objs_from_index(objs_to_unlink);
+ int r = bucket->remove_objs_from_index(dpp, objs_to_unlink);
if (r < 0) {
set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " +
cpp_strerror(-r));
}
if (fix_index) {
- int r = bucket->remove_objs_from_index(objs_to_unlink);
+ int r = bucket->remove_objs_from_index(dpp, objs_to_unlink);
if (r < 0) {
set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " +
cpp_strerror(-r));
return -EINVAL;
}
- bucket->set_tag_timeout(BUCKET_TAG_TIMEOUT);
+ bucket->set_tag_timeout(dpp, BUCKET_TAG_TIMEOUT);
string prefix;
string empty_delimiter;
formatter->close_section();
- bucket->set_tag_timeout(0);
+ bucket->set_tag_timeout(dpp, 0);
return 0;
}
-int RGWBucket::check_index(RGWBucketAdminOpState& op_state,
+int RGWBucket::check_index(const DoutPrefixProvider *dpp,
+ RGWBucketAdminOpState& op_state,
map<RGWObjCategory, RGWStorageStats>& existing_stats,
map<RGWObjCategory, RGWStorageStats>& calculated_stats,
std::string *err_msg)
{
bool fix_index = op_state.will_fix_index();
- int r = bucket->check_index(existing_stats, calculated_stats);
+ int r = bucket->check_index(dpp, existing_stats, calculated_stats);
if (r < 0) {
set_err_msg(err_msg, "failed to check index error=" + cpp_strerror(-r));
return r;
}
if (fix_index) {
- r = bucket->rebuild_index();
+ r = bucket->rebuild_index(dpp);
if (r < 0) {
set_err_msg(err_msg, "failed to rebuild index err=" + cpp_strerror(-r));
return r;
int shard_id = bucket->get_info().layout.current_index.layout.normal.num_shards? 0 : -1;
if (!sync) {
- r = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_stop(bucket->get_info(), -1);
+ r = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_stop(dpp, bucket->get_info(), -1);
if (r < 0) {
set_err_msg(err_msg, "ERROR: failed writing stop bilog:" + cpp_strerror(-r));
return r;
}
} else {
- r = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_start(bucket->get_info(), -1);
+ r = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_start(dpp, bucket->get_info(), -1);
if (r < 0) {
set_err_msg(err_msg, "ERROR: failed writing resync bilog:" + cpp_strerror(-r));
return r;
if (ret < 0)
return ret;
- ret = bucket.check_index(op_state, existing_stats, calculated_stats);
+ ret = bucket.check_index(dpp, op_state, existing_stats, calculated_stats);
if (ret < 0)
return ret;
string bucket_ver, master_ver;
string max_marker;
- ret = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker);
+ ret = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker);
if (ret < 0) {
cerr << "error getting bucket stats bucket=" << bucket->get_name() << " ret=" << ret << std::endl;
return ret;
/* need stats for num_entries */
string bucket_ver, master_ver;
std::map<RGWObjCategory, RGWStorageStats> stats;
- ret = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, stats, nullptr);
+ ret = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, nullptr);
if (ret < 0)
continue;
bool truncated = true;
formatter->open_array_section("buckets");
- ret = store->meta_list_keys_init("bucket", string(), &handle);
+ ret = store->meta_list_keys_init(dpp, "bucket", string(), &handle);
while (ret == 0 && truncated) {
std::list<std::string> buckets;
constexpr int max_keys = 1000;
int r = store->get_bucket(dpp, nullptr, rbucket, &bucket, null_yield);
if (r < 0){
// this can only happen if someone deletes us right when we're processing
- lderr(store->ctx()) << "Bucket instance is invalid: " << bucket_instance
+ ldpp_dout(dpp, -1) << "Bucket instance is invalid: " << bucket_instance
<< cpp_strerror(-r) << dendl;
continue;
}
r = reshard_lock.lock();
if (r < 0) {
// most likely bucket is under reshard, return the sureshot stale instances
- ldout(store->ctx(), 5) << __func__
+ ldpp_dout(dpp, 5) << __func__
<< "failed to take reshard lock; reshard underway likey" << dendl;
return;
}
Formatter *formatter = flusher.get_formatter();
static constexpr auto default_max_keys = 1000;
- int ret = store->meta_list_keys_init("bucket.instance", marker, &handle);
+ int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return ret;
return ret;
}
- return rgw::lc::fix_lc_shard_entry(store, store->get_rgwlc()->get_lc(), bucket.get());
+ return rgw::lc::fix_lc_shard_entry(dpp, store, store->get_rgwlc()->get_lc(), bucket.get());
}
static void format_lc_status(Formatter* formatter,
process_single_lc_entry(store, formatter, user_id.tenant, bucket_name, dpp);
formatter->flush(cout);
} else {
- int ret = store->meta_list_keys_init("bucket", marker, &handle);
+ int ret = store->meta_list_keys_init(dpp, "bucket", marker, &handle);
if (ret < 0) {
std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return ret;
do {
int ret = bucket->list(dpp, params, listing_max_entries, results, null_yield);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR failed to list objects in the bucket" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR failed to list objects in the bucket" << dendl;
return ret;
}
for (const auto& obj : results.objs) {
RGWBucket admin_bucket;
int ret = admin_bucket.init(store, op_state, null_yield, dpp);
if (ret < 0) {
- lderr(store->ctx()) << "failed to initialize bucket" << dendl;
+ ldpp_dout(dpp, -1) << "failed to initialize bucket" << dendl;
return ret;
}
std::unique_ptr<rgw::sal::Bucket> bucket;
*/
ret = ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false);
if (ret < 0) {
- lderr(svc.bucket->ctx()) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl;
+ ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl;
}
ret = svc.bucket->remove_bucket_entrypoint_info(ctx, entry, &objv_tracker, y, dpp);
if (ret < 0) {
- lderr(svc.bucket->ctx()) << "could not delete bucket=" << entry << dendl;
+ ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl;
}
/* idempotent */
return 0;
ret = svc.bucket->store_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(new_be.bucket),
new_be, true, mtime, &attrs, nullptr, y, dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl;
return ret;
}
ret = ctl.bucket->link_bucket(new_be.owner, new_be.bucket, new_be.creation_time, y, dpp, false);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl;
return ret;
}
ret = ctl.bucket->unlink_bucket(be.owner, entry_bucket, y, dpp, false);
if (ret < 0) {
- lderr(cct) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl;
+ ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl;
}
// if (ret == -ECANCELED) it means that there was a race here, and someone
y,
dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl;
return ret;
}
ret = ctl.bucket->remove_bucket_instance_info(be.bucket, old_bi, y, dpp);
if (ret < 0) {
- lderr(cct) << "could not delete bucket=" << entry << dendl;
+ ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl;
}
obj->get_bucket_info().encode(*bl);
}
- int put_check() override;
+ int put_check(const DoutPrefixProvider *dpp) override;
int put_checked(const DoutPrefixProvider *dpp) override;
int put_post(const DoutPrefixProvider *dpp) override;
};
}
}
-int RGWMetadataHandlerPut_BucketInstance::put_check()
+int RGWMetadataHandlerPut_BucketInstance::put_check(const DoutPrefixProvider *dpp)
{
int ret;
bci.info.bucket.tenant = tenant_name;
// if the sync module never writes data, don't require the zone to specify all placement targets
if (bihandler->svc.zone->sync_module_supports_writes()) {
- ret = bihandler->svc.zone->select_bucket_location_by_rule(bci.info.placement_rule, &rule_info, y);
+ ret = bihandler->svc.zone->select_bucket_location_by_rule(dpp, bci.info.placement_rule, &rule_info, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: select_bucket_placement() returned " << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: select_bucket_placement() returned " << ret << dendl;
return ret;
}
}
objv_tracker = bci.info.objv_tracker;
- int ret = bihandler->svc.bi->init_index(bci.info);
+ int ret = bihandler->svc.bi->init_index(dpp, bci.info);
if (ret < 0) {
return ret;
}
RGWArchiveBucketInstanceMetadataHandler() {}
int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {
- ldout(cct, 0) << "SKIP: bucket instance removal is not allowed on archive zone: bucket.instance:" << entry << dendl;
+ ldpp_dout(dpp, 0) << "SKIP: bucket instance removal is not allowed on archive zone: bucket.instance:" << entry << dendl;
return 0;
}
};
RGWSI_Bucket::get_entrypoint_meta_key(bucket),
&entry_point, &ot, &ep_mtime, &attrs, y, dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: get_bucket_entrypoint_info() returned " << ret << " bucket=" << bucket << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: get_bucket_entrypoint_info() returned " << ret << " bucket=" << bucket << dendl;
return ret;
}
ret = do_store_linked_bucket_info(ctx, info, nullptr, false, ep_mtime, &ot.write_version, &attrs, true, y, dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to put_linked_bucket_info(): " << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to put_linked_bucket_info(): " << ret << dendl;
return ret;
}
/* an old bucket object, need to convert it */
int ret = convert_old_bucket_info(ctx, bucket, y, dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed converting old bucket info: " << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed converting old bucket info: " << ret << dendl;
return ret;
}
}
nullptr, &attrs,
y, dpp);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: "
+ ldpp_dout(dpp, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: "
<< cpp_strerror(-ret) << dendl;
}
pattrs = &attrs;
}
}
- ret = ctl.user->add_bucket(user_id, bucket, creation_time, y);
+ ret = ctl.user->add_bucket(dpp, user_id, bucket, creation_time, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: error adding bucket to user directory:"
+ ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user directory:"
<< " user=" << user_id
<< " bucket=" << bucket
<< " err=" << cpp_strerror(-ret)
done_err:
int r = do_unlink_bucket(ctx, user_id, bucket, true, y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed unlinking bucket on error cleanup: "
+ ldpp_dout(dpp, 0) << "ERROR: failed unlinking bucket on error cleanup: "
<< cpp_strerror(-r) << dendl;
}
return ret;
optional_yield y,
const DoutPrefixProvider *dpp)
{
- int ret = ctl.user->remove_bucket(user_id, bucket, y);
+ int ret = ctl.user->remove_bucket(dpp, user_id, bucket, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: error removing bucket from directory: "
<< cpp_strerror(-ret)<< dendl;
results.objs.clear();
int ret = bucket->list(dpp, params, max_entries, results, y);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: list objects failed: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: list objects failed: " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = r_obj->get_obj_attrs(&obj_ctx, y, dpp);
if (ret < 0){
- ldout(store->ctx(), 0) << "ERROR: failed to read object " << obj.key.name << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read object " << obj.key.name << cpp_strerror(-ret) << dendl;
continue;
}
const auto& aiter = r_obj->get_attrs().find(RGW_ATTR_ACL);
attrs[RGW_ATTR_ACL] = bl;
ret = r_obj->set_obj_attrs(dpp, &obj_ctx, &attrs, nullptr, y);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl;
return ret;
}
}
});
}
-int RGWBucketCtl::sync_user_stats(const rgw_user& user_id,
+int RGWBucketCtl::sync_user_stats(const DoutPrefixProvider *dpp,
+ const rgw_user& user_id,
const RGWBucketInfo& bucket_info,
optional_yield y,
RGWBucketEnt* pent)
if (!pent) {
pent = &ent;
}
- int r = svc.bi->read_stats(bucket_info, pent, null_yield);
+ int r = svc.bi->read_stats(dpp, bucket_info, pent, null_yield);
if (r < 0) {
- ldout(cct, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl;
return r;
}
- return ctl.user->flush_bucket_stats(user_id, *pent, y);
+ return ctl.user->flush_bucket_stats(dpp, user_id, *pent, y);
}
int RGWBucketCtl::get_sync_policy_handler(std::optional<rgw_zone_id> zone,
return svc.bucket_sync->get_policy_handler(ctx, zone, bucket, phandler, y, dpp);
});
if (r < 0) {
- ldout(cct, 20) << __func__ << "(): failed to get policy handler for bucket=" << bucket << " (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): failed to get policy handler for bucket=" << bucket << " (r=" << r << ")" << dendl;
return r;
}
return 0;
optional_yield y,
std::string *err_msg = NULL);
- int check_index(RGWBucketAdminOpState& op_state,
+ int check_index(const DoutPrefixProvider *dpp,
+ RGWBucketAdminOpState& op_state,
map<RGWObjCategory, RGWStorageStats>& existing_stats,
map<RGWObjCategory, RGWStorageStats>& calculated_stats,
std::string *err_msg = NULL);
const DoutPrefixProvider *dpp);
/* quota related */
- int sync_user_stats(const rgw_user& user_id, const RGWBucketInfo& bucket_info,
+ int sync_user_stats(const DoutPrefixProvider *dpp,
+ const rgw_user& user_id, const RGWBucketInfo& bucket_info,
optional_yield y,
RGWBucketEnt* pent = nullptr);
};
-bool rgw_find_bucket_by_id(CephContext *cct, rgw::sal::Store* store, const string& marker,
+bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store, const string& marker,
const string& bucket_id, rgw_bucket* bucket_out);
#endif
return new RGWBucketSyncPolicyHandler(this, bucket, sync_policy);
}
-int RGWBucketSyncPolicyHandler::init(optional_yield y)
+int RGWBucketSyncPolicyHandler::init(const DoutPrefixProvider *dpp, optional_yield y)
{
- int r = bucket_sync_svc->get_bucket_sync_hints(bucket.value_or(rgw_bucket()),
+ int r = bucket_sync_svc->get_bucket_sync_hints(dpp, bucket.value_or(rgw_bucket()),
&source_hints,
&target_hints,
y);
if (r < 0) {
- ldout(bucket_sync_svc->ctx(), 0) << "ERROR: failed to initialize bucket sync policy handler: get_bucket_sync_hints() on bucket="
+ ldpp_dout(dpp, 0) << "ERROR: failed to initialize bucket sync policy handler: get_bucket_sync_hints() on bucket="
<< bucket << " returned r=" << r << dendl;
return r;
}
RGWBucketSyncPolicyHandler *alloc_child(const rgw_bucket& bucket,
std::optional<rgw_sync_policy_info> sync_policy) const;
- int init(optional_yield y);
+ int init(const DoutPrefixProvider *dpp, optional_yield y);
void reflect(RGWBucketSyncFlowManager::pipe_set *psource_pipes,
RGWBucketSyncFlowManager::pipe_set *ptarget_pipes,
#define dout_subsys ceph_subsys_rgw
-int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info)
+int ObjectCache::get(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info)
{
std::shared_lock rl{lock};
}
auto iter = cache_map.find(name);
if (iter == cache_map.end()) {
- ldout(cct, 10) << "cache get: name=" << name << " : miss" << dendl;
+ ldpp_dout(dpp, 10) << "cache get: name=" << name << " : miss" << dendl;
if (perfcounter) {
perfcounter->inc(l_rgw_cache_miss);
}
if (expiry.count() &&
(ceph::coarse_mono_clock::now() - iter->second.info.time_added) > expiry) {
- ldout(cct, 10) << "cache get: name=" << name << " : expiry miss" << dendl;
+ ldpp_dout(dpp, 10) << "cache get: name=" << name << " : expiry miss" << dendl;
rl.unlock();
std::unique_lock wl{lock}; // write lock for insertion
// check that wasn't already removed by other thread
ObjectCacheEntry *entry = &iter->second;
if (lru_counter - entry->lru_promotion_ts > lru_window) {
- ldout(cct, 20) << "cache get: touching lru, lru_counter=" << lru_counter
+ ldpp_dout(dpp, 20) << "cache get: touching lru, lru_counter=" << lru_counter
<< " promotion_ts=" << entry->lru_promotion_ts << dendl;
rl.unlock();
std::unique_lock wl{lock}; // write lock for insertion
/* need to redo this because entry might have dropped off the cache */
iter = cache_map.find(name);
if (iter == cache_map.end()) {
- ldout(cct, 10) << "lost race! cache get: name=" << name << " : miss" << dendl;
+ ldpp_dout(dpp, 10) << "lost race! cache get: name=" << name << " : miss" << dendl;
if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
return -ENOENT;
}
entry = &iter->second;
/* check again, we might have lost a race here */
if (lru_counter - entry->lru_promotion_ts > lru_window) {
- touch_lru(name, *entry, iter->second.lru_iter);
+ touch_lru(dpp, name, *entry, iter->second.lru_iter);
}
}
ObjectCacheInfo& src = iter->second.info;
if(src.status == -ENOENT) {
- ldout(cct, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl;
+ ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl;
if (perfcounter) perfcounter->inc(l_rgw_cache_hit);
return -ENODATA;
}
if ((src.flags & mask) != mask) {
- ldout(cct, 10) << "cache get: name=" << name << " : type miss (requested=0x"
+ ldpp_dout(dpp, 10) << "cache get: name=" << name << " : type miss (requested=0x"
<< std::hex << mask << ", cached=0x" << src.flags
<< std::dec << ")" << dendl;
if(perfcounter) perfcounter->inc(l_rgw_cache_miss);
return -ENOENT;
}
- ldout(cct, 10) << "cache get: name=" << name << " : hit (requested=0x"
+ ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (requested=0x"
<< std::hex << mask << ", cached=0x" << src.flags
<< std::dec << ")" << dendl;
return 0;
}
-bool ObjectCache::chain_cache_entry(std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
+bool ObjectCache::chain_cache_entry(const DoutPrefixProvider *dpp,
+ std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
RGWChainedCache::Entry *chained_entry)
{
std::unique_lock l{lock};
entries.reserve(cache_info_entries.size());
/* first verify that all entries are still valid */
for (auto cache_info : cache_info_entries) {
- ldout(cct, 10) << "chain_cache_entry: cache_locator="
+ ldpp_dout(dpp, 10) << "chain_cache_entry: cache_locator="
<< cache_info->cache_locator << dendl;
auto iter = cache_map.find(cache_info->cache_locator);
if (iter == cache_map.end()) {
- ldout(cct, 20) << "chain_cache_entry: couldn't find cache locator" << dendl;
+ ldpp_dout(dpp, 20) << "chain_cache_entry: couldn't find cache locator" << dendl;
return false;
}
auto entry = &iter->second;
if (entry->gen != cache_info->gen) {
- ldout(cct, 20) << "chain_cache_entry: entry.gen (" << entry->gen
+ ldpp_dout(dpp, 20) << "chain_cache_entry: entry.gen (" << entry->gen
<< ") != cache_info.gen (" << cache_info->gen << ")"
<< dendl;
return false;
return true;
}
-void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info)
+void ObjectCache::put(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info)
{
std::unique_lock l{lock};
return;
}
- ldout(cct, 10) << "cache put: name=" << name << " info.flags=0x"
+ ldpp_dout(dpp, 10) << "cache put: name=" << name << " info.flags=0x"
<< std::hex << info.flags << std::dec << dendl;
auto [iter, inserted] = cache_map.emplace(name, ObjectCacheEntry{});
entry.chained_entries.clear();
entry.gen++;
- touch_lru(name, entry, entry.lru_iter);
+ touch_lru(dpp, name, entry, entry.lru_iter);
target.status = info.status;
target.xattrs = info.xattrs;
map<string, bufferlist>::iterator iter;
for (iter = target.xattrs.begin(); iter != target.xattrs.end(); ++iter) {
- ldout(cct, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
+ ldpp_dout(dpp, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
}
} else if (info.flags & CACHE_FLAG_MODIFY_XATTRS) {
map<string, bufferlist>::iterator iter;
for (iter = info.rm_xattrs.begin(); iter != info.rm_xattrs.end(); ++iter) {
- ldout(cct, 10) << "removing xattr: name=" << iter->first << dendl;
+ ldpp_dout(dpp, 10) << "removing xattr: name=" << iter->first << dendl;
target.xattrs.erase(iter->first);
}
for (iter = info.xattrs.begin(); iter != info.xattrs.end(); ++iter) {
- ldout(cct, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
+ ldpp_dout(dpp, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl;
target.xattrs[iter->first] = iter->second;
}
}
target.version = info.version;
}
-bool ObjectCache::remove(const string& name)
+bool ObjectCache::remove(const DoutPrefixProvider *dpp, const string& name)
{
std::unique_lock l{lock};
if (iter == cache_map.end())
return false;
- ldout(cct, 10) << "removing " << name << " from cache" << dendl;
+ ldpp_dout(dpp, 10) << "removing " << name << " from cache" << dendl;
ObjectCacheEntry& entry = iter->second;
for (auto& kv : entry.chained_entries) {
return true;
}
-void ObjectCache::touch_lru(const string& name, ObjectCacheEntry& entry,
+void ObjectCache::touch_lru(const DoutPrefixProvider *dpp, const string& name, ObjectCacheEntry& entry,
std::list<string>::iterator& lru_iter)
{
while (lru_size > (size_t)cct->_conf->rgw_cache_lru_size) {
lru.push_back(name);
lru_size++;
lru_iter--;
- ldout(cct, 10) << "adding " << name << " to cache LRU end" << dendl;
+ ldpp_dout(dpp, 10) << "adding " << name << " to cache LRU end" << dendl;
} else {
- ldout(cct, 10) << "moving " << name << " to cache LRU end" << dendl;
+ ldpp_dout(dpp, 10) << "moving " << name << " to cache LRU end" << dendl;
lru.erase(lru_iter);
lru.push_back(name);
lru_iter = lru.end();
bool enabled;
ceph::timespan expiry;
- void touch_lru(const string& name, ObjectCacheEntry& entry,
+ void touch_lru(const DoutPrefixProvider *dpp, const string& name, ObjectCacheEntry& entry,
std::list<string>::iterator& lru_iter);
void remove_lru(const string& name, std::list<string>::iterator& lru_iter);
void invalidate_lru(ObjectCacheEntry& entry);
public:
ObjectCache() : lru_size(0), lru_counter(0), lru_window(0), cct(NULL), enabled(false) { }
~ObjectCache();
- int get(const std::string& name, ObjectCacheInfo& bl, uint32_t mask, rgw_cache_entry_info *cache_info);
- std::optional<ObjectCacheInfo> get(const std::string& name) {
+ int get(const DoutPrefixProvider *dpp, const std::string& name, ObjectCacheInfo& bl, uint32_t mask, rgw_cache_entry_info *cache_info);
+ std::optional<ObjectCacheInfo> get(const DoutPrefixProvider *dpp, const std::string& name) {
std::optional<ObjectCacheInfo> info{std::in_place};
- auto r = get(name, *info, 0, nullptr);
+ auto r = get(dpp, name, *info, 0, nullptr);
return r < 0 ? std::nullopt : info;
}
}
}
- void put(const std::string& name, ObjectCacheInfo& bl, rgw_cache_entry_info *cache_info);
- bool remove(const std::string& name);
+ void put(const DoutPrefixProvider *dpp, const std::string& name, ObjectCacheInfo& bl, rgw_cache_entry_info *cache_info);
+ bool remove(const DoutPrefixProvider *dpp, const std::string& name);
void set_ctx(CephContext *_cct) {
cct = _cct;
lru_window = cct->_conf->rgw_cache_lru_size / 2;
expiry = std::chrono::seconds(cct->_conf.get_val<uint64_t>(
"rgw_cache_expiry_interval"));
}
- bool chain_cache_entry(std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
+ bool chain_cache_entry(const DoutPrefixProvider *dpp,
+ std::initializer_list<rgw_cache_entry_info*> cache_info_entries,
RGWChainedCache::Entry *chained_entry);
void set_enabled(bool status);
}
}
-int RGWCoroutinesStack::operate(RGWCoroutinesEnv *_env)
+int RGWCoroutinesStack::operate(const DoutPrefixProvider *dpp, RGWCoroutinesEnv *_env)
{
env = _env;
RGWCoroutine *op = *pos;
op->stack = this;
- ldout(cct, 20) << *op << ": operate()" << dendl;
- int r = op->operate_wrapper();
+ ldpp_dout(dpp, 20) << *op << ": operate()" << dendl;
+ int r = op->operate_wrapper(dpp);
if (r < 0) {
- ldout(cct, 20) << *op << ": operate() returned r=" << r << dendl;
+ ldpp_dout(dpp, 20) << *op << ": operate() returned r=" << r << dendl;
}
error_flag = op->is_error();
cr->io_complete(io_id);
}
-int RGWCoroutinesManager::run(list<RGWCoroutinesStack *>& stacks)
+int RGWCoroutinesManager::run(const DoutPrefixProvider *dpp, list<RGWCoroutinesStack *>& stacks)
{
int ret = 0;
int blocked_count = 0;
lock.unlock();
- ret = stack->operate(&env);
+ ret = stack->operate(dpp, &env);
lock.lock();
stack->set_is_scheduled(false);
if (ret < 0) {
- ldout(cct, 20) << "stack->operate() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 20) << "stack->operate() returned ret=" << ret << dendl;
}
if (stack->is_error()) {
return ret;
}
-int RGWCoroutinesManager::run(RGWCoroutine *op)
+int RGWCoroutinesManager::run(const DoutPrefixProvider *dpp, RGWCoroutine *op)
{
if (!op) {
return 0;
stacks.push_back(stack);
- int r = run(stacks);
+ int r = run(dpp, stacks);
if (r < 0) {
- ldout(cct, 20) << "run(stacks) returned r=" << r << dendl;
+ ldpp_dout(dpp, 20) << "run(stacks) returned r=" << r << dendl;
} else {
r = op->get_ret_status();
}
request_cleanup();
}
-int RGWSimpleCoroutine::operate()
+int RGWSimpleCoroutine::operate(const DoutPrefixProvider *dpp)
{
int ret = 0;
reenter(this) {
yield return state_init();
- yield return state_send_request();
+ yield return state_send_request(dpp);
yield return state_request_complete();
yield return state_all_complete();
drain_all();
return 0;
}
-int RGWSimpleCoroutine::state_send_request()
+int RGWSimpleCoroutine::state_send_request(const DoutPrefixProvider *dpp)
{
- int ret = send_request();
+ int ret = send_request(dpp);
if (ret < 0) {
call_cleanup();
return set_state(RGWCoroutine_Error, ret);
return status;
}
- virtual int operate_wrapper() {
- return operate();
+ virtual int operate_wrapper(const DoutPrefixProvider *dpp) {
+ return operate(dpp);
}
public:
RGWCoroutine(CephContext *_cct) : status(_cct), _yield_ret(false), cct(_cct), stack(NULL), retcode(0), state(RGWCoroutine_Run) {}
~RGWCoroutine() override;
- virtual int operate() = 0;
+ virtual int operate(const DoutPrefixProvider *dpp) = 0;
bool is_done() { return (state == RGWCoroutine_Done || state == RGWCoroutine_Error); }
bool is_error() { return (state == RGWCoroutine_Error); }
return id;
}
- int operate(RGWCoroutinesEnv *env);
+ int operate(const DoutPrefixProvider *dpp, RGWCoroutinesEnv *env);
bool is_done() {
return done_flag;
}
}
- int run(list<RGWCoroutinesStack *>& ops);
- int run(RGWCoroutine *op);
+ int run(const DoutPrefixProvider *dpp, list<RGWCoroutinesStack *>& ops);
+ int run(const DoutPrefixProvider *dpp, RGWCoroutine *op);
void stop() {
bool expected = false;
if (going_down.compare_exchange_strong(expected, true)) {
class RGWSimpleCoroutine : public RGWCoroutine {
bool called_cleanup;
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
int state_init();
- int state_send_request();
+ int state_send_request(const DoutPrefixProvider *dpp);
int state_request_complete();
int state_all_complete();
~RGWSimpleCoroutine() override;
virtual int init() { return 0; }
- virtual int send_request() = 0;
+ virtual int send_request(const DoutPrefixProvider *dpp) = 0;
virtual int request_complete() = 0;
virtual int finish() { return 0; }
virtual void request_cleanup() {}
if (obj) {
for( ; obj; obj = iter.get_next()) {
const char *s = obj->get_data().c_str();
- dout(10) << "RGWCORSRule::xml_end, el : " << el << ", data : " << s << dendl;
+ ldpp_dout(dpp, 10) << "RGWCORSRule::xml_end, el : " << el << ", data : " << s << dendl;
if (strcasecmp(s, "GET") == 0) {
allowed_methods |= RGW_CORS_GET;
} else if (strcasecmp(s, "POST") == 0) {
if (xml_id != NULL) {
string data = xml_id->get_data();
if (data.length() > 255) {
- dout(0) << "RGWCORSRule has id of length greater than 255" << dendl;
+ ldpp_dout(dpp, 0) << "RGWCORSRule has id of length greater than 255" << dendl;
return false;
}
- dout(10) << "RGWCORRule id : " << data << dendl;
+ ldpp_dout(dpp, 10) << "RGWCORRule id : " << data << dendl;
id = data;
}
/*Check if there is atleast one AllowedOrigin*/
iter = find("AllowedOrigin");
if (!(obj = iter.get_next())) {
- dout(0) << "RGWCORSRule does not have even one AllowedOrigin" << dendl;
+ ldpp_dout(dpp, 0) << "RGWCORSRule does not have even one AllowedOrigin" << dendl;
return false;
}
for( ; obj; obj = iter.get_next()) {
- dout(10) << "RGWCORSRule - origin : " << obj->get_data() << dendl;
+ ldpp_dout(dpp, 10) << "RGWCORSRule - origin : " << obj->get_data() << dendl;
/*Just take the hostname*/
string host = obj->get_data();
if (validate_name_string(host) != 0)
unsigned long long ull = strtoull(obj->get_data().c_str(), &end, 10);
if (*end != '\0') {
- dout(0) << "RGWCORSRule's MaxAgeSeconds " << obj->get_data() << " is an invalid integer" << dendl;
+ ldpp_dout(dpp, 0) << "RGWCORSRule's MaxAgeSeconds " << obj->get_data() << " is an invalid integer" << dendl;
return false;
}
if (ull >= 0x100000000ull) {
} else {
max_age = (uint32_t)ull;
}
- dout(10) << "RGWCORSRule : max_age : " << max_age << dendl;
+ ldpp_dout(dpp, 10) << "RGWCORSRule : max_age : " << max_age << dendl;
}
/*Check and update ExposeHeader*/
iter = find("ExposeHeader");
if ((obj = iter.get_next())) {
for(; obj; obj = iter.get_next()) {
- dout(10) << "RGWCORSRule - exp_hdr : " << obj->get_data() << dendl;
+ ldpp_dout(dpp, 10) << "RGWCORSRule - exp_hdr : " << obj->get_data() << dendl;
exposable_hdrs.push_back(obj->get_data());
}
}
iter = find("AllowedHeader");
if ((obj = iter.get_next())) {
for(; obj; obj = iter.get_next()) {
- dout(10) << "RGWCORSRule - allowed_hdr : " << obj->get_data() << dendl;
+ ldpp_dout(dpp, 10) << "RGWCORSRule - allowed_hdr : " << obj->get_data() << dendl;
string s = obj->get_data();
if (validate_name_string(s) != 0)
return false;
XMLObjIter iter = find("CORSRule");
RGWCORSRule_S3 *obj;
if (!(obj = static_cast<RGWCORSRule_S3 *>(iter.get_next()))) {
- dout(0) << "CORSConfiguration should have atleast one CORSRule" << dendl;
+ ldpp_dout(dpp, 0) << "CORSConfiguration should have atleast one CORSRule" << dendl;
return false;
}
for(; obj; obj = static_cast<RGWCORSRule_S3 *>(iter.get_next())) {
XMLObj *RGWCORSXMLParser_S3::alloc_obj(const char *el) {
if (strcmp(el, "CORSConfiguration") == 0) {
- return new RGWCORSConfiguration_S3;
+ return new RGWCORSConfiguration_S3(dpp);
} else if (strcmp(el, "CORSRule") == 0) {
- return new RGWCORSRule_S3;
+ return new RGWCORSRule_S3(dpp);
} else if (strcmp(el, "ID") == 0) {
return new CORSRuleID_S3;
} else if (strcmp(el, "AllowedOrigin") == 0) {
#include <include/types.h>
#include <common/Formatter.h>
+#include <common/dout.h>
#include "rgw_xml.h"
#include "rgw_cors.h"
class RGWCORSRule_S3 : public RGWCORSRule, public XMLObj
{
+ const DoutPrefixProvider *dpp;
public:
- RGWCORSRule_S3() {}
+ RGWCORSRule_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {}
~RGWCORSRule_S3() override {}
bool xml_end(const char *el) override;
class RGWCORSConfiguration_S3 : public RGWCORSConfiguration, public XMLObj
{
+ const DoutPrefixProvider *dpp;
public:
- RGWCORSConfiguration_S3() {}
+ RGWCORSConfiguration_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {}
~RGWCORSConfiguration_S3() override {}
bool xml_end(const char *el) override;
class RGWCORSXMLParser_S3 : public RGWXMLParser
{
+ const DoutPrefixProvider *dpp;
CephContext *cct;
XMLObj *alloc_obj(const char *el) override;
public:
- explicit RGWCORSXMLParser_S3(CephContext *_cct) : cct(_cct) {}
+ explicit RGWCORSXMLParser_S3(const DoutPrefixProvider *_dpp, CephContext *_cct) : dpp(_dpp), cct(_cct) {}
};
#endif /*CEPH_RGW_CORS_S3_H*/
}
void RGWAsyncRadosProcessor::RGWWQ::_process(RGWAsyncRadosRequest *req, ThreadPool::TPHandle& handle) {
- processor->handle_request(req);
+ processor->handle_request(this, req);
processor->req_throttle.put(1);
}
}
}
-void RGWAsyncRadosProcessor::handle_request(RGWAsyncRadosRequest *req) {
- req->send_request();
+void RGWAsyncRadosProcessor::handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req) {
+ req->send_request(dpp);
req->put();
}
req_wq.queue(req);
}
-int RGWAsyncGetSystemObj::_send_request()
+int RGWAsyncGetSystemObj::_send_request(const DoutPrefixProvider *dpp)
{
map<string, bufferlist> *pattrs = want_attrs ? &attrs : nullptr;
.set_objv_tracker(&objv_tracker)
.set_attrs(pattrs)
.set_raw_attrs(raw_attrs)
- .read(&bl, null_yield);
+ .read(dpp, &bl, null_yield);
}
-RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
+RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
bool want_attrs, bool raw_attrs)
- : RGWAsyncRadosRequest(caller, cn), obj_ctx(_svc),
+ : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), obj_ctx(_svc),
obj(_obj), want_attrs(want_attrs), raw_attrs(raw_attrs)
{
if (_objv_tracker) {
}
}
-int RGWSimpleRadosReadAttrsCR::send_request()
+int RGWSimpleRadosReadAttrsCR::send_request(const DoutPrefixProvider *dpp)
{
- req = new RGWAsyncGetSystemObj(this, stack->create_completion_notifier(),
+ req = new RGWAsyncGetSystemObj(dpp, this, stack->create_completion_notifier(),
svc, objv_tracker, obj, true, raw_attrs);
async_rados->queue(req);
return 0;
return req->get_ret_status();
}
-int RGWAsyncPutSystemObj::_send_request()
+int RGWAsyncPutSystemObj::_send_request(const DoutPrefixProvider *dpp)
{
auto obj_ctx = svc->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(obj);
return sysobj.wop()
.set_objv_tracker(&objv_tracker)
.set_exclusive(exclusive)
- .write_data(bl, null_yield);
+ .write_data(dpp, bl, null_yield);
}
-RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn,
+RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(const DoutPrefixProvider *_dpp,
+ RGWCoroutine *caller,
+ RGWAioCompletionNotifier *cn,
RGWSI_SysObj *_svc,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
bool _exclusive, bufferlist _bl)
- : RGWAsyncRadosRequest(caller, cn), svc(_svc),
+ : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc),
obj(_obj), exclusive(_exclusive), bl(std::move(_bl))
{
if (_objv_tracker) {
}
}
-int RGWAsyncPutSystemObjAttrs::_send_request()
+int RGWAsyncPutSystemObjAttrs::_send_request(const DoutPrefixProvider *dpp)
{
auto obj_ctx = svc->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(obj);
.set_objv_tracker(&objv_tracker)
.set_exclusive(false)
.set_attrs(attrs)
- .write_attrs(null_yield);
+ .write_attrs(dpp, null_yield);
}
-RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(RGWCoroutine *caller, RGWAioCompletionNotifier *cn,
+RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn,
RGWSI_SysObj *_svc,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
map<string, bufferlist> _attrs)
- : RGWAsyncRadosRequest(caller, cn), svc(_svc),
+ : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc),
obj(_obj), attrs(std::move(_attrs))
{
if (_objv_tracker) {
{
}
-int RGWAsyncLockSystemObj::_send_request()
+int RGWAsyncLockSystemObj::_send_request(const DoutPrefixProvider *dpp)
{
rgw_rados_ref ref;
- int r = store->getRados()->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
}
{
}
-int RGWAsyncUnlockSystemObj::_send_request()
+int RGWAsyncUnlockSystemObj::_send_request(const DoutPrefixProvider *dpp)
{
rgw_rados_ref ref;
- int r = store->getRados()->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
}
s << "]";
}
-int RGWRadosSetOmapKeysCR::send_request()
+int RGWRadosSetOmapKeysCR::send_request(const DoutPrefixProvider *dpp)
{
- int r = store->getRados()->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
}
set_description() << "get omap keys dest=" << obj << " marker=" << marker;
}
-int RGWRadosGetOmapKeysCR::send_request() {
- int r = store->getRados()->get_raw_obj_ref(obj, &result->ref);
+int RGWRadosGetOmapKeysCR::send_request(const DoutPrefixProvider *dpp) {
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
}
set_description() << "get omap keys dest=" << obj << " marker=" << marker;
}
-int RGWRadosGetOmapValsCR::send_request() {
- int r = store->getRados()->get_raw_obj_ref(obj, &result->ref);
+int RGWRadosGetOmapValsCR::send_request(const DoutPrefixProvider *dpp) {
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
}
set_description() << "remove omap keys dest=" << obj << " keys=" << keys;
}
-int RGWRadosRemoveOmapKeysCR::send_request() {
- int r = store->getRados()->get_raw_obj_ref(obj, &ref);
+int RGWRadosRemoveOmapKeysCR::send_request(const DoutPrefixProvider *dpp) {
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
}
set_description() << "remove dest=" << obj;
}
-int RGWRadosRemoveCR::send_request()
+int RGWRadosRemoveCR::send_request(const DoutPrefixProvider *dpp)
{
auto rados = store->getRados()->get_rados_handle();
int r = rados->ioctx_create(obj.pool.name.c_str(), ioctx);
}
}
-int RGWSimpleRadosLockCR::send_request()
+int RGWSimpleRadosLockCR::send_request(const DoutPrefixProvider *dpp)
{
set_status() << "sending request";
req = new RGWAsyncLockSystemObj(this, stack->create_completion_notifier(),
}
}
-int RGWSimpleRadosUnlockCR::send_request()
+int RGWSimpleRadosUnlockCR::send_request(const DoutPrefixProvider *dpp)
{
set_status() << "sending request";
return req->get_ret_status();
}
-int RGWOmapAppend::operate() {
+int RGWOmapAppend::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
for (;;) {
if (!has_product() && going_down) {
return (!is_done());
}
-int RGWAsyncGetBucketInstanceInfo::_send_request()
+int RGWAsyncGetBucketInstanceInfo::_send_request(const DoutPrefixProvider *dpp)
{
int r;
if (!bucket.bucket_id.empty()) {
return 0;
}
-RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(rgw::sal::RadosStore* store,
+RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info,
int shard_id,
const std::string& start_marker,
start_marker(BucketIndexShardsManager::get_shard_marker(start_marker)),
end_marker(BucketIndexShardsManager::get_shard_marker(end_marker))
{
- bs.init(bucket_info, bucket_info.layout.current_index, shard_id);
+ bs.init(dpp, bucket_info, bucket_info.layout.current_index, shard_id);
}
-int RGWRadosBILogTrimCR::send_request()
+int RGWRadosBILogTrimCR::send_request(const DoutPrefixProvider *dpp)
{
bufferlist in;
cls_rgw_bi_log_trim_op call;
return r;
}
-int RGWAsyncFetchRemoteObj::_send_request()
+int RGWAsyncFetchRemoteObj::_send_request(const DoutPrefixProvider *dpp)
{
RGWObjectCtx obj_ctx(store);
&bytes_transferred);
if (r < 0) {
- ldout(store->ctx(), 0) << "store->fetch_remote_obj() returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "store->fetch_remote_obj() returned r=" << r << dendl;
if (counters) {
counters->inc(sync_counters::l_fetch_err, 1);
}
return r;
}
-int RGWAsyncStatRemoteObj::_send_request()
+int RGWAsyncStatRemoteObj::_send_request(const DoutPrefixProvider *dpp)
{
RGWObjectCtx obj_ctx(store);
rgw::sal::RadosBucket bucket(store, src_bucket);
rgw::sal::RadosObject src_obj(store, key, &bucket);
- int r = store->getRados()->stat_remote_obj(obj_ctx,
+ int r = store->getRados()->stat_remote_obj(dpp,
+ obj_ctx,
rgw_user(user_id),
nullptr, /* req_info */
source_zone,
petag); /* string *petag, */
if (r < 0) {
- ldout(store->ctx(), 0) << "store->fetch_remote_obj() returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "store->fetch_remote_obj() returned r=" << r << dendl;
}
return r;
}
-int RGWAsyncRemoveObj::_send_request()
+int RGWAsyncRemoveObj::_send_request(const DoutPrefixProvider *dpp)
{
RGWObjectCtx obj_ctx(store);
rgw_obj obj(bucket_info.bucket, key);
- ldout(store->ctx(), 0) << __func__ << "(): deleting obj=" << obj << dendl;
+ ldpp_dout(dpp, 0) << __func__ << "(): deleting obj=" << obj << dendl;
obj_ctx.set_atomic(obj);
return ret;
}
-int RGWContinuousLeaseCR::operate()
+int RGWContinuousLeaseCR::operate(const DoutPrefixProvider *dpp)
{
if (aborted) {
caller->set_sleeping(false);
return 0;
}
-RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(rgw::sal::RadosStore* _store, const string& _oid,
+RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(const DoutPrefixProvider *_dpp, rgw::sal::RadosStore* _store, const string& _oid,
const cls_log_entry& entry) : RGWSimpleCoroutine(_store->ctx()),
+ dpp(_dpp),
store(_store),
oid(_oid), cn(NULL)
{
entries.push_back(entry);
}
-int RGWRadosTimelogAddCR::send_request()
+int RGWRadosTimelogAddCR::send_request(const DoutPrefixProvider *dpp)
{
set_status() << "sending request";
cn = stack->create_completion_notifier();
- return store->svc()->cls->timelog.add(oid, entries, cn->completion(), true, null_yield);
+ return store->svc()->cls->timelog.add(dpp, oid, entries, cn->completion(), true, null_yield);
}
int RGWRadosTimelogAddCR::request_complete()
return r;
}
-RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(rgw::sal::RadosStore* store,
+RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const std::string& oid,
const real_time& start_time,
const real_time& end_time,
const std::string& from_marker,
const std::string& to_marker)
- : RGWSimpleCoroutine(store->ctx()), store(store), oid(oid),
+ : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), oid(oid),
start_time(start_time), end_time(end_time),
from_marker(from_marker), to_marker(to_marker)
{
<< " from_marker=" << from_marker << " to_marker=" << to_marker;
}
-int RGWRadosTimelogTrimCR::send_request()
+int RGWRadosTimelogTrimCR::send_request(const DoutPrefixProvider *dpp)
{
set_status() << "sending request";
cn = stack->create_completion_notifier();
- return store->svc()->cls->timelog.trim(oid, start_time, end_time, from_marker,
+ return store->svc()->cls->timelog.trim(dpp, oid, start_time, end_time, from_marker,
to_marker, cn->completion(),
null_yield);
}
}
-RGWSyncLogTrimCR::RGWSyncLogTrimCR(rgw::sal::RadosStore* store, const std::string& oid,
+RGWSyncLogTrimCR::RGWSyncLogTrimCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store, const std::string& oid,
const std::string& to_marker,
std::string *last_trim_marker)
- : RGWRadosTimelogTrimCR(store, oid, real_time{}, real_time{},
+ : RGWRadosTimelogTrimCR(dpp, store, oid, real_time{}, real_time{},
std::string{}, to_marker),
cct(store->ctx()), last_trim_marker(last_trim_marker)
{
}
-int RGWAsyncStatObj::_send_request()
+int RGWAsyncStatObj::_send_request(const DoutPrefixProvider *dpp)
{
rgw_raw_obj raw_obj;
store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
- return store->getRados()->raw_obj_stat(raw_obj, psize, pmtime, pepoch,
+ return store->getRados()->raw_obj_stat(dpp, raw_obj, psize, pmtime, pepoch,
nullptr, nullptr, objv_tracker, null_yield);
}
-RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store,
+RGWStatObjCR::RGWStatObjCR(const DoutPrefixProvider *dpp,
+ RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store,
const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize,
real_time* pmtime, uint64_t *pepoch,
RGWObjVersionTracker *objv_tracker)
- : RGWSimpleCoroutine(store->ctx()), store(store), async_rados(async_rados),
+ : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), async_rados(async_rados),
bucket_info(_bucket_info), obj(obj), psize(psize), pmtime(pmtime), pepoch(pepoch),
objv_tracker(objv_tracker)
{
}
}
-int RGWStatObjCR::send_request()
+int RGWStatObjCR::send_request(const DoutPrefixProvider *dpp)
{
- req = new RGWAsyncStatObj(this, stack->create_completion_notifier(),
+ req = new RGWAsyncStatObj(dpp, this, stack->create_completion_notifier(),
store, bucket_info, obj, psize, pmtime, pepoch, objv_tracker);
async_rados->queue(req);
return 0;
set_description() << "notify dest=" << obj;
}
-int RGWRadosNotifyCR::send_request()
+int RGWRadosNotifyCR::send_request(const DoutPrefixProvider *dpp)
{
- int r = store->getRados()->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
}
#include "services/svc_sys_obj.h"
#include "services/svc_bucket.h"
+#define dout_subsys ceph_subsys_rgw
+
class RGWAsyncRadosRequest : public RefCountedObject {
RGWCoroutine *caller;
RGWAioCompletionNotifier *notifier;
ceph::mutex lock = ceph::make_mutex("RGWAsyncRadosRequest::lock");
protected:
- virtual int _send_request() = 0;
+ virtual int _send_request(const DoutPrefixProvider *dpp) = 0;
public:
RGWAsyncRadosRequest(RGWCoroutine *_caller, RGWAioCompletionNotifier *_cn)
: caller(_caller), notifier(_cn), retcode(0) {
}
}
- void send_request() {
+ void send_request(const DoutPrefixProvider *dpp) {
get();
- retcode = _send_request();
+ retcode = _send_request(dpp);
{
std::lock_guard l{lock};
if (notifier) {
ThreadPool m_tp;
Throttle req_throttle;
- struct RGWWQ : public ThreadPool::WorkQueue<RGWAsyncRadosRequest> {
+ struct RGWWQ : public DoutPrefixProvider, public ThreadPool::WorkQueue<RGWAsyncRadosRequest> {
RGWAsyncRadosProcessor *processor;
RGWWQ(RGWAsyncRadosProcessor *p,
ceph::timespan timeout, ceph::timespan suicide_timeout,
void _clear() override {
ceph_assert(processor->m_req_queue.empty());
}
+
+ CephContext *get_cct() const { return processor->cct; }
+ unsigned get_subsys() const { return ceph_subsys_rgw; }
+ std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw async rados processor: ";}
+
} req_wq;
public:
~RGWAsyncRadosProcessor() {}
void start();
void stop();
- void handle_request(RGWAsyncRadosRequest *req);
+ void handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req);
void queue(RGWAsyncRadosRequest *req);
bool is_going_down() {
return going_down;
}
+
};
template <class P>
P params;
const DoutPrefixProvider *dpp;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
Request(RGWCoroutine *caller,
RGWAioCompletionNotifier *cn,
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new Request(this,
stack->create_completion_notifier(),
store,
std::shared_ptr<R> result;
const DoutPrefixProvider *dpp;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
- Request(RGWCoroutine *caller,
+ Request(const DoutPrefixProvider *dpp,
+ RGWCoroutine *caller,
RGWAioCompletionNotifier *cn,
rgw::sal::RadosStore* _store,
const P& _params,
}
}
- int send_request() override {
- req = new Request(this,
+ int send_request(const DoutPrefixProvider *dpp) override {
+ req = new Request(dpp,
+ this,
stack->create_completion_notifier(),
store,
params,
class Request : public RGWAsyncRadosRequest {
std::shared_ptr<Action> action;
protected:
- int _send_request() override {
+ int _send_request(const DoutPrefixProvider *dpp) override {
if (!action) {
return 0;
}
return action->operate();
}
public:
- Request(RGWCoroutine *caller,
+ Request(const DoutPrefixProvider *dpp,
+ RGWCoroutine *caller,
RGWAioCompletionNotifier *cn,
std::shared_ptr<Action>& _action) : RGWAsyncRadosRequest(caller, cn),
action(_action) {}
}
}
- int send_request() override {
- req = new Request(this,
+ int send_request(const DoutPrefixProvider *dpp) override {
+ req = new Request(dpp, this,
stack->create_completion_notifier(),
action);
class RGWAsyncGetSystemObj : public RGWAsyncRadosRequest {
+ const DoutPrefixProvider *dpp;
RGWSysObjectCtx obj_ctx;
rgw_raw_obj obj;
const bool want_attrs;
const bool raw_attrs;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
- RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
+ RGWAsyncGetSystemObj(const DoutPrefixProvider *dpp,
+ RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
bool want_attrs, bool raw_attrs);
};
class RGWAsyncPutSystemObj : public RGWAsyncRadosRequest {
+ const DoutPrefixProvider *dpp;
RGWSI_SysObj *svc;
rgw_raw_obj obj;
bool exclusive;
bufferlist bl;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
- RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
+ RGWAsyncPutSystemObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller,
+ RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
bool _exclusive, bufferlist _bl);
};
class RGWAsyncPutSystemObjAttrs : public RGWAsyncRadosRequest {
+ const DoutPrefixProvider *dpp;
RGWSI_SysObj *svc;
rgw_raw_obj obj;
map<string, bufferlist> attrs;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
- RGWAsyncPutSystemObjAttrs(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
+ RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
map<string, bufferlist> _attrs);
uint32_t duration_secs;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
string cookie;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
template <class T>
class RGWSimpleRadosReadCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
RGWAsyncRadosProcessor *async_rados;
RGWSI_SysObj *svc;
RGWAsyncGetSystemObj *req{nullptr};
public:
- RGWSimpleRadosReadCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc,
+ RGWSimpleRadosReadCR(const DoutPrefixProvider *_dpp,
+ RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc,
const rgw_raw_obj& _obj,
T *_result, bool empty_on_enoent = true,
RGWObjVersionTracker *objv_tracker = nullptr)
- : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados), svc(_svc),
+ : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados), svc(_svc),
obj(_obj), result(_result),
empty_on_enoent(empty_on_enoent), objv_tracker(objv_tracker) {}
~RGWSimpleRadosReadCR() override {
}
}
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
virtual int handle_data(T& data) {
};
template <class T>
-int RGWSimpleRadosReadCR<T>::send_request()
+int RGWSimpleRadosReadCR<T>::send_request(const DoutPrefixProvider *dpp)
{
- req = new RGWAsyncGetSystemObj(this, stack->create_completion_notifier(), svc,
+ req = new RGWAsyncGetSystemObj(dpp, this, stack->create_completion_notifier(), svc,
objv_tracker, obj, false, false);
async_rados->queue(req);
return 0;
}
class RGWSimpleRadosReadAttrsCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
RGWAsyncRadosProcessor *async_rados;
RGWSI_SysObj *svc;
RGWAsyncGetSystemObj *req = nullptr;
public:
- RGWSimpleRadosReadAttrsCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc,
+ RGWSimpleRadosReadAttrsCR(const DoutPrefixProvider *_dpp, RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc,
const rgw_raw_obj& _obj, map<string, bufferlist> *_pattrs,
bool _raw_attrs, RGWObjVersionTracker* objv_tracker = nullptr)
: RGWSimpleCoroutine(_svc->ctx()),
+ dpp(_dpp),
async_rados(_async_rados), svc(_svc),
obj(_obj),
pattrs(_pattrs),
}
}
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
template <class T>
class RGWSimpleRadosWriteCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
RGWAsyncRadosProcessor *async_rados;
RGWSI_SysObj *svc;
bufferlist bl;
RGWAsyncPutSystemObj *req{nullptr};
public:
- RGWSimpleRadosWriteCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc,
+ RGWSimpleRadosWriteCR(const DoutPrefixProvider *_dpp,
+ RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc,
const rgw_raw_obj& _obj,
const T& _data, RGWObjVersionTracker *objv_tracker = nullptr)
- : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados),
+ : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados),
svc(_svc), obj(_obj), objv_tracker(objv_tracker) {
encode(_data, bl);
}
}
}
- int send_request() override {
- req = new RGWAsyncPutSystemObj(this, stack->create_completion_notifier(),
+ int send_request(const DoutPrefixProvider *dpp) override {
+ req = new RGWAsyncPutSystemObj(dpp, this, stack->create_completion_notifier(),
svc, objv_tracker, obj, false, std::move(bl));
async_rados->queue(req);
return 0;
};
class RGWSimpleRadosWriteAttrsCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
RGWAsyncRadosProcessor *async_rados;
RGWSI_SysObj *svc;
RGWObjVersionTracker *objv_tracker;
RGWAsyncPutSystemObjAttrs *req = nullptr;
public:
- RGWSimpleRadosWriteAttrsCR(RGWAsyncRadosProcessor *_async_rados,
+ RGWSimpleRadosWriteAttrsCR(const DoutPrefixProvider *_dpp,
+ RGWAsyncRadosProcessor *_async_rados,
RGWSI_SysObj *_svc, const rgw_raw_obj& _obj,
map<string, bufferlist> _attrs,
RGWObjVersionTracker *objv_tracker = nullptr)
- : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados),
+ : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados),
svc(_svc), objv_tracker(objv_tracker), obj(_obj),
attrs(std::move(_attrs)) {
}
}
}
- int send_request() override {
- req = new RGWAsyncPutSystemObjAttrs(this, stack->create_completion_notifier(),
+ int send_request(const DoutPrefixProvider *dpp) override {
+ req = new RGWAsyncPutSystemObjAttrs(dpp, this, stack->create_completion_notifier(),
svc, objv_tracker, obj, std::move(attrs));
async_rados->queue(req);
return 0;
const rgw_raw_obj& _obj,
map<string, bufferlist>& _entries);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
const string& _marker, int _max_entries,
ResultPtr result);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
private:
const string& _marker, int _max_entries,
ResultPtr result);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
private:
const rgw_raw_obj& _obj,
const set<string>& _keys);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
RGWRadosRemoveCR(rgw::sal::RadosStore* store, const rgw_raw_obj& obj,
RGWObjVersionTracker* objv_tracker = nullptr);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
}
void request_cleanup() override;
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
static std::string gen_random_cookie(CephContext* cct) {
}
void request_cleanup() override;
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store,
const rgw_raw_obj& _obj,
uint64_t _window_size = OMAP_APPEND_MAX_ENTRIES_DEFAULT);
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
void flush_pending();
bool append(const string& s);
bool finish();
const DoutPrefixProvider *dpp;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
RGWAsyncGetBucketInstanceInfo(RGWCoroutine *caller, RGWAioCompletionNotifier *cn,
rgw::sal::RadosStore* _store, const rgw_bucket& bucket,
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncGetBucketInstanceInfo(this, stack->create_completion_notifier(), store, bucket, dpp);
async_rados->queue(req);
return 0;
std::string end_marker;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosBILogTrimCR(rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info,
+ RGWRadosBILogTrimCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info,
int shard_id, const std::string& start_marker,
const std::string& end_marker);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
const DoutPrefixProvider *dpp;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
const rgw_zone_id& _source_zone,
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncFetchRemoteObj(this, stack->create_completion_notifier(), store,
source_zone, user_id, src_bucket, dest_placement_rule, dest_bucket_info,
key, dest_key, versioned_epoch, copy_if_newer, filter,
map<string, string> *pheaders;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
const rgw_zone_id& _source_zone,
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncStatRemoteObj(this, stack->create_completion_notifier(), store, source_zone,
src_bucket, key, pmtime, psize, petag, pattrs, pheaders);
async_rados->queue(req);
rgw_zone_set zones_trace;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
RGWAsyncRemoveObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn,
rgw::sal::RadosStore* _store,
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncRemoveObj(dpp, this, stack->create_completion_notifier(), store, source_zone, bucket_info,
key, owner, owner_display_name, versioned, versioned_epoch,
delete_marker, del_if_older, timestamp, zones_trace);
interval(_interval), caller(_caller)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
bool is_locked() const {
return locked;
};
class RGWRadosTimelogAddCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
list<cls_log_entry> entries;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosTimelogAddCR(rgw::sal::RadosStore* _store, const string& _oid,
+ RGWRadosTimelogAddCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* _store, const string& _oid,
const cls_log_entry& entry);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
protected:
std::string to_marker;
public:
- RGWRadosTimelogTrimCR(rgw::sal::RadosStore* store, const std::string& oid,
+ RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store, const std::string& oid,
const real_time& start_time, const real_time& end_time,
const std::string& from_marker,
const std::string& to_marker);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
public:
static constexpr const char* max_marker = "99999999";
- RGWSyncLogTrimCR(rgw::sal::RadosStore* store, const std::string& oid,
+ RGWSyncLogTrimCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store, const std::string& oid,
const std::string& to_marker, std::string *last_trim_marker);
int request_complete() override;
};
class RGWAsyncStatObj : public RGWAsyncRadosRequest {
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWBucketInfo bucket_info;
rgw_obj obj;
uint64_t *pepoch;
RGWObjVersionTracker *objv_tracker;
protected:
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
- RGWAsyncStatObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* store,
+ RGWAsyncStatObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* store,
const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr,
real_time *pmtime = nullptr, uint64_t *pepoch = nullptr,
RGWObjVersionTracker *objv_tracker = nullptr)
- : RGWAsyncRadosRequest(caller, cn), store(store), obj(obj), psize(psize),
+ : RGWAsyncRadosRequest(caller, cn), dpp(dpp), store(store), obj(obj), psize(psize),
pmtime(pmtime), pepoch(pepoch), objv_tracker(objv_tracker) {}
};
class RGWStatObjCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWAsyncRadosProcessor *async_rados;
RGWBucketInfo bucket_info;
RGWObjVersionTracker *objv_tracker;
RGWAsyncStatObj *req = nullptr;
public:
- RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store,
+ RGWStatObjCR(const DoutPrefixProvider *dpp, RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store,
const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr,
real_time* pmtime = nullptr, uint64_t *pepoch = nullptr,
RGWObjVersionTracker *objv_tracker = nullptr);
}
void request_cleanup() override;
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
bufferlist& request, uint64_t timeout_ms,
bufferlist *response);
- int send_request() override;
+ int send_request(const DoutPrefixProvider *dpp) override;
int request_complete() override;
};
}
}
-int RGWStreamReadHTTPResourceCRF::init()
+int RGWStreamReadHTTPResourceCRF::init(const DoutPrefixProvider *dpp)
{
env->stack->init_new_io(req);
}
}
-void RGWStreamWriteHTTPResourceCRF::send_ready(const rgw_rest_obj& rest_obj)
+void RGWStreamWriteHTTPResourceCRF::send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj)
{
req->set_send_length(rest_obj.content_len);
for (auto h : rest_obj.attrs) {
in_crf(_in_crf), out_crf(_out_crf) {}
RGWStreamSpliceCR::~RGWStreamSpliceCR() { }
-int RGWStreamSpliceCR::operate() {
+int RGWStreamSpliceCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
{
- int ret = in_crf->init();
+ int ret = in_crf->init(dpp);
if (ret < 0) {
return set_cr_error(ret);
}
if (ret < 0) {
return set_cr_error(ret);
}
- out_crf->send_ready(in_crf->get_rest_obj());
+ out_crf->send_ready(dpp, in_crf->get_rest_obj());
ret = out_crf->send();
if (ret < 0) {
return set_cr_error(ret);
request_cleanup();
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
auto op = boost::intrusive_ptr<RGWRESTReadResource>(
new RGWRESTReadResource(conn, path, params, &extra_headers, http_manager));
init_new_io(op.get());
- int ret = op->aio_read();
+ int ret = op->aio_read(dpp);
if (ret < 0) {
log_error() << "failed to send http operation: " << op->to_str()
<< " ret=" << ret << std::endl;
request_cleanup();
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
auto op = boost::intrusive_ptr<RGWRESTSendResource>(
new RGWRESTSendResource(conn, method, path, params, &headers, http_manager));
init_new_io(op.get());
- int ret = op->aio_send(input_bl);
+ int ret = op->aio_send(dpp, input_bl);
if (ret < 0) {
- lsubdout(cct, rgw, 0) << "ERROR: failed to send request" << dendl;
+ ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send request" << dendl;
op->put();
return ret;
}
request_cleanup();
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
auto op = boost::intrusive_ptr<RGWRESTDeleteResource>(
new RGWRESTDeleteResource(conn, path, params, nullptr, http_manager));
bufferlist bl;
- int ret = op->aio_send(bl);
+ int ret = op->aio_send(dpp, bl);
if (ret < 0) {
- lsubdout(cct, rgw, 0) << "ERROR: failed to send DELETE request" << dendl;
+ ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send DELETE request" << dendl;
op->put();
return ret;
}
boost::asio::coroutine read_state;
public:
- virtual int init() = 0;
+ virtual int init(const DoutPrefixProvider *dpp) = 0;
virtual int read(bufferlist *data, uint64_t max, bool *need_retry) = 0; /* reentrant */
virtual int decode_rest_obj(map<string, string>& headers, bufferlist& extra_data) = 0;
virtual bool has_attrs() = 0;
public:
virtual int init() = 0;
- virtual void send_ready(const rgw_rest_obj& rest_obj) = 0;
+ virtual void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) = 0;
virtual int send() = 0;
virtual int write(bufferlist& data, bool *need_retry) = 0; /* reentrant */
virtual int drain_writes(bool *need_retry) = 0; /* reentrant */
}
~RGWStreamReadHTTPResourceCRF();
- int init() override;
+ int init(const DoutPrefixProvider *dpp) override;
int read(bufferlist *data, uint64_t max, bool *need_retry) override; /* reentrant */
int decode_rest_obj(map<string, string>& headers, bufferlist& extra_data) override;
bool has_attrs() override;
int init() override {
return 0;
}
- void send_ready(const rgw_rest_obj& rest_obj) override;
+ void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override;
int send() override;
int write(bufferlist& data, bool *need_retry) override; /* reentrant */
void write_drain_notify(uint64_t pending_size);
std::shared_ptr<RGWStreamWriteHTTPResourceCRF>& _out_crf);
~RGWStreamSpliceCR();
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
#define dout_subsys ceph_subsys_rgw
template<>
-int RGWUserCreateCR::Request::_send_request()
+int RGWUserCreateCR::Request::_send_request(const DoutPrefixProvider *dpp)
{
CephContext *cct = store->ctx();
}
template<>
-int RGWGetUserInfoCR::Request::_send_request()
+int RGWGetUserInfoCR::Request::_send_request(const DoutPrefixProvider *dpp)
{
return store->ctl()->user->get_info_by_uid(dpp, params.user, result.get(), null_yield);
}
template<>
-int RGWGetBucketInfoCR::Request::_send_request()
+int RGWGetBucketInfoCR::Request::_send_request(const DoutPrefixProvider *dpp)
{
return store->get_bucket(dpp, nullptr, params.tenant, params.bucket_name, &result->bucket, null_yield);
}
template<>
-int RGWBucketCreateLocalCR::Request::_send_request()
+int RGWBucketCreateLocalCR::Request::_send_request(const DoutPrefixProvider *dpp)
{
CephContext *cct = store->ctx();
auto& zone_svc = store->svc()->zone;
if (!placement_rule.empty() &&
!zone_svc->get_zone_params().valid_placement(placement_rule)) {
- ldout(cct, 0) << "placement target (" << placement_rule << ")"
+ ldpp_dout(dpp, 0) << "placement target (" << placement_rule << ")"
<< " doesn't exist in the placement targets of zonegroup"
<< " (" << zone_svc->get_zonegroup().api_name << ")" << dendl;
return -ERR_INVALID_LOCATION_CONSTRAINT;
rgw_bucket bucket;
bucket.tenant = user.tenant;
bucket.name = bucket_name;
- ret = zone_svc->select_bucket_placement(*user_info, zonegroup_id,
+ ret = zone_svc->select_bucket_placement(dpp, *user_info, zonegroup_id,
placement_rule,
&selected_placement_rule, nullptr, null_yield);
if (selected_placement_rule != bucket_info.placement_rule) {
- ldout(cct, 0) << "bucket already exists on a different placement rule: "
+ ldpp_dout(dpp, 0) << "bucket already exists on a different placement rule: "
<< " selected_rule= " << selected_placement_rule
<< " existing_rule= " << bucket_info.placement_rule << dendl;
return -EEXIST;
}
template<>
-int RGWObjectSimplePutCR::Request::_send_request()
+int RGWObjectSimplePutCR::Request::_send_request(const DoutPrefixProvider *dpp)
{
RGWDataAccess::ObjectRef obj;
}
template<>
-int RGWBucketLifecycleConfigCR::Request::_send_request()
+int RGWBucketLifecycleConfigCR::Request::_send_request(const DoutPrefixProvider *dpp)
{
CephContext *cct = store->ctx();
}
template<>
-int RGWBucketGetSyncPolicyHandlerCR::Request::_send_request()
+int RGWBucketGetSyncPolicyHandlerCR::Request::_send_request(const DoutPrefixProvider *dpp)
{
int r = store->ctl()->bucket->get_sync_policy_handler(params.zone,
params.bucket,
RGWGetObj_BlockDecrypt::~RGWGetObj_BlockDecrypt() {
}
-int RGWGetObj_BlockDecrypt::read_manifest(bufferlist& manifest_bl) {
+int RGWGetObj_BlockDecrypt::read_manifest(const DoutPrefixProvider *dpp, bufferlist& manifest_bl) {
parts_len.clear();
RGWObjManifest manifest;
if (manifest_bl.length()) {
try {
decode(manifest, miter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl;
return -EIO;
}
RGWObjManifest::obj_iterator mi;
- for (mi = manifest.obj_begin(); mi != manifest.obj_end(); ++mi) {
+ for (mi = manifest.obj_begin(dpp); mi != manifest.obj_end(dpp); ++mi) {
if (mi.get_cur_stripe() == 0) {
parts_len.push_back(0);
}
}
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (size_t i = 0; i<parts_len.size(); i++) {
- ldout(cct, 20) << "Manifest part " << i << ", size=" << parts_len[i] << dendl;
+ ldpp_dout(dpp, 20) << "Manifest part " << i << ", size=" << parts_len[i] << dendl;
}
}
}
get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM);
if (! req_sse_ca.empty()) {
if (req_sse_ca != "AES256") {
- ldout(s->cct, 5) << "ERROR: Invalid value for header "
+ ldpp_dout(s, 5) << "ERROR: Invalid value for header "
<< "x-amz-server-side-encryption-customer-algorithm"
<< dendl;
s->err.message = "The requested encryption algorithm is not valid, must be AES256.";
}
if (s->cct->_conf->rgw_crypt_require_ssl &&
!rgw_transport_is_secure(s->cct, *s->info.env)) {
- ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl;
+ ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl;
return -ERR_INVALID_REQUEST;
}
key_bin = from_base64(
get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY) );
} catch (...) {
- ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption "
+ ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption "
<< "key which contains character that is not base64 encoded."
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
}
if (key_bin.size() != AES_256_CBC::AES_256_KEYSIZE) {
- ldout(s->cct, 5) << "ERROR: invalid encryption key size" << dendl;
+ ldpp_dout(s, 5) << "ERROR: invalid encryption key size" << dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
"provided keys must provide an appropriate secret key.";
return -EINVAL;
try {
keymd5_bin = from_base64(keymd5);
} catch (...) {
- ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption key "
+ ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption key "
<< "md5 which contains character that is not base64 encoded."
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
}
if (keymd5_bin.size() != CEPH_CRYPTO_MD5_DIGESTSIZE) {
- ldout(s->cct, 5) << "ERROR: Invalid key md5 size" << dendl;
+ ldpp_dout(s, 5) << "ERROR: Invalid key md5 size" << dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
"provided keys must provide an appropriate secret key md5.";
return -EINVAL;
key_hash.Final(key_hash_res);
if (memcmp(key_hash_res, keymd5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
- ldout(s->cct, 5) << "ERROR: Invalid key md5 hash" << dendl;
+ ldpp_dout(s, 5) << "ERROR: Invalid key md5 hash" << dendl;
s->err.message = "The calculated MD5 hash of the key did not match the hash that was provided.";
return -EINVAL;
}
std::string_view customer_key =
get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY);
if (!customer_key.empty()) {
- ldout(s->cct, 5) << "ERROR: SSE-C encryption request is missing the header "
+ ldpp_dout(s, 5) << "ERROR: SSE-C encryption request is missing the header "
<< "x-amz-server-side-encryption-customer-algorithm"
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
std::string_view customer_key_md5 =
get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5);
if (!customer_key_md5.empty()) {
- ldout(s->cct, 5) << "ERROR: SSE-C encryption request is missing the header "
+ ldpp_dout(s, 5) << "ERROR: SSE-C encryption request is missing the header "
<< "x-amz-server-side-encryption-customer-algorithm"
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
if (s->cct->_conf->rgw_crypt_require_ssl &&
!rgw_transport_is_secure(s->cct, *s->info.env)) {
- ldout(s->cct, 5) << "ERROR: insecure request, rgw_crypt_require_ssl is set" << dendl;
+ ldpp_dout(s, 5) << "ERROR: insecure request, rgw_crypt_require_ssl is set" << dendl;
return -ERR_INVALID_REQUEST;
}
std::string_view key_id =
get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID);
if (key_id.empty()) {
- ldout(s->cct, 5) << "ERROR: not provide a valid key id" << dendl;
+ ldpp_dout(s, 5) << "ERROR: not provide a valid key id" << dendl;
s->err.message = "Server Side Encryption with KMS managed key requires "
"HTTP header x-amz-server-side-encryption-aws-kms-key-id";
return -ERR_INVALID_ACCESS_KEY;
std::string actual_key;
res = make_actual_key_from_kms(s->cct, attrs, actual_key);
if (res != 0) {
- ldout(s->cct, 5) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl;
+ ldpp_dout(s, 5) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl;
s->err.message = "Failed to retrieve the actual key, kms-keyid: " + std::string(key_id);
return res;
}
if (actual_key.size() != AES_256_KEYSIZE) {
- ldout(s->cct, 5) << "ERROR: key obtained from key_id:" <<
+ ldpp_dout(s, 5) << "ERROR: key obtained from key_id:" <<
key_id << " is not 256 bit size" << dendl;
s->err.message = "KMS provided an invalid key for the given kms-keyid.";
return -ERR_INVALID_ACCESS_KEY;
} else if (req_sse == "AES256") {
/* if a default encryption key was provided, we will use it for SSE-S3 */
} else {
- ldout(s->cct, 5) << "ERROR: Invalid value for header x-amz-server-side-encryption"
+ ldpp_dout(s, 5) << "ERROR: Invalid value for header x-amz-server-side-encryption"
<< dendl;
s->err.message = "Server Side Encryption with KMS managed key requires "
"HTTP header x-amz-server-side-encryption : aws:kms or AES256";
get_crypt_attribute(s->info.env, parts,
X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID);
if (!key_id.empty()) {
- ldout(s->cct, 5) << "ERROR: SSE-KMS encryption request is missing the header "
+ ldpp_dout(s, 5) << "ERROR: SSE-KMS encryption request is missing the header "
<< "x-amz-server-side-encryption"
<< dendl;
s->err.message = "Server Side Encryption with KMS managed key requires "
try {
master_encryption_key = from_base64(s->cct->_conf->rgw_crypt_default_encryption_key);
} catch (...) {
- ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid default encryption key "
+ ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid default encryption key "
<< "which contains character that is not base64 encoded."
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
}
if (master_encryption_key.size() != 256 / 8) {
- ldout(s->cct, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl;
+ ldpp_dout(s, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl;
/* not an error to return; missing encryption does not inhibit processing */
return 0;
}
{
int res = 0;
std::string stored_mode = get_str_attribute(attrs, RGW_ATTR_CRYPT_MODE);
- ldout(s->cct, 15) << "Encryption mode: " << stored_mode << dendl;
+ ldpp_dout(s, 15) << "Encryption mode: " << stored_mode << dendl;
const char *req_sse = s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION", NULL);
if (nullptr != req_sse && (s->op == OP_GET || s->op == OP_HEAD)) {
if (stored_mode == "SSE-C-AES256") {
if (s->cct->_conf->rgw_crypt_require_ssl &&
!rgw_transport_is_secure(s->cct, *s->info.env)) {
- ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl;
+ ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl;
return -ERR_INVALID_REQUEST;
}
const char *req_cust_alg =
s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM", NULL);
if (nullptr == req_cust_alg) {
- ldout(s->cct, 5) << "ERROR: Request for SSE-C encrypted object missing "
+ ldpp_dout(s, 5) << "ERROR: Request for SSE-C encrypted object missing "
<< "x-amz-server-side-encryption-customer-algorithm"
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
"provided keys must provide a valid encryption algorithm.";
return -EINVAL;
} else if (strcmp(req_cust_alg, "AES256") != 0) {
- ldout(s->cct, 5) << "ERROR: The requested encryption algorithm is not valid, must be AES256." << dendl;
+ ldpp_dout(s, 5) << "ERROR: The requested encryption algorithm is not valid, must be AES256." << dendl;
s->err.message = "The requested encryption algorithm is not valid, must be AES256.";
return -ERR_INVALID_ENCRYPTION_ALGORITHM;
}
try {
key_bin = from_base64(s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY", ""));
} catch (...) {
- ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key "
+ ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key "
<< "which contains character that is not base64 encoded."
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
}
if (key_bin.size() != AES_256_CBC::AES_256_KEYSIZE) {
- ldout(s->cct, 5) << "ERROR: Invalid encryption key size" << dendl;
+ ldpp_dout(s, 5) << "ERROR: Invalid encryption key size" << dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
"provided keys must provide an appropriate secret key.";
return -EINVAL;
try {
keymd5_bin = from_base64(keymd5);
} catch (...) {
- ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key md5 "
+ ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key md5 "
<< "which contains character that is not base64 encoded."
<< dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
if (keymd5_bin.size() != CEPH_CRYPTO_MD5_DIGESTSIZE) {
- ldout(s->cct, 5) << "ERROR: Invalid key md5 size " << dendl;
+ ldpp_dout(s, 5) << "ERROR: Invalid key md5 size " << dendl;
s->err.message = "Requests specifying Server Side Encryption with Customer "
"provided keys must provide an appropriate secret key md5.";
return -EINVAL;
if (stored_mode == "SSE-KMS") {
if (s->cct->_conf->rgw_crypt_require_ssl &&
!rgw_transport_is_secure(s->cct, *s->info.env)) {
- ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl;
+ ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl;
return -ERR_INVALID_REQUEST;
}
/* try to retrieve actual key */
std::string actual_key;
res = reconstitute_actual_key_from_kms(s->cct, attrs, actual_key);
if (res != 0) {
- ldout(s->cct, 10) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl;
+ ldpp_dout(s, 10) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl;
s->err.message = "Failed to retrieve the actual key, kms-keyid: " + key_id;
return res;
}
if (actual_key.size() != AES_256_KEYSIZE) {
- ldout(s->cct, 0) << "ERROR: key obtained from key_id:" <<
+ ldpp_dout(s, 0) << "ERROR: key obtained from key_id:" <<
key_id << " is not 256 bit size" << dendl;
s->err.message = "KMS provided an invalid key for the given kms-keyid.";
return -ERR_INVALID_ACCESS_KEY;
try {
master_encryption_key = from_base64(std::string(s->cct->_conf->rgw_crypt_default_encryption_key));
} catch (...) {
- ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid default encryption key "
+ ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid default encryption key "
<< "which contains character that is not base64 encoded."
<< dendl;
s->err.message = "The default encryption key is not valid base64.";
}
if (master_encryption_key.size() != 256 / 8) {
- ldout(s->cct, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl;
+ ldpp_dout(s, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl;
return -EIO;
}
std::string attr_key_selector = get_str_attribute(attrs, RGW_ATTR_CRYPT_KEYSEL);
if (attr_key_selector.size() != AES_256_CBC::AES_256_KEYSIZE) {
- ldout(s->cct, 0) << "ERROR: missing or invalid " RGW_ATTR_CRYPT_KEYSEL << dendl;
+ ldpp_dout(s, 0) << "ERROR: missing or invalid " RGW_ATTR_CRYPT_KEYSEL << dendl;
return -EIO;
}
uint8_t actual_key[AES_256_KEYSIZE];
off_t bl_len) override;
virtual int flush() override;
- int read_manifest(bufferlist& manifest_bl);
+ int read_manifest(const DoutPrefixProvider *dpp, bufferlist& manifest_bl);
}; /* RGWGetObj_BlockDecrypt */
return false;
}
using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
- spawn(new CR(env->async_rados, env->svc->sysobj,
+ spawn(new CR(env->dpp, env->async_rados, env->svc->sysobj,
rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)),
&markers[shard_id]),
false);
rgw_data_sync_status *_status)
: RGWCoroutine(_sc->cct), sc(_sc), sync_env(sc->env), sync_status(_status)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWReadDataSyncStatusCoroutine::operate()
+int RGWReadDataSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// read sync info
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_data_sync_info>;
yield {
bool empty_on_enoent = false; // fail on ENOENT
- call(new ReadInfoCR(sync_env->async_rados, sync_env->svc->sysobj,
+ call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)),
&sync_status->sync_info, empty_on_enoent));
}
if (retcode < 0) {
- ldout(sync_env->cct, 4) << "failed to read sync status info with "
+ ldpp_dout(dpp, 4) << "failed to read sync status info with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
yield call(new ReadMarkersCR(sc, sync_status->sync_info.num_shards,
sync_status->sync_markers));
if (retcode < 0) {
- ldout(sync_env->cct, 4) << "failed to read sync status markers with "
+ ldpp_dout(dpp, 4) << "failed to read sync status markers with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
}
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
char buf[16];
init_new_io(http_op);
- int ret = http_op->aio_read();
+ int ret = http_op->aio_read(dpp);
if (ret < 0) {
- ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
return set_cr_error(ret);
}
}
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
char buf[16];
if (sync_env->counters) {
timer.emplace(sync_env->counters, sync_counters::l_poll);
}
- int ret = http_op->aio_read();
+ int ret = http_op->aio_read(dpp);
if (ret < 0) {
- ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
if (sync_env->counters) {
sync_env->counters->inc(sync_counters::l_poll_err);
: RGWSimpleCoroutine(sc->cct), sc(sc), sync_env(sc->env), http_op(NULL),
shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
RGWRESTConn *conn = sc->conn;
char buf[32];
http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager);
init_new_io(http_op);
- int ret = http_op->aio_read();
+ int ret = http_op->aio_read(dpp);
if (ret < 0) {
- ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
return ret;
int ret = http_op->wait(result, null_yield);
http_op->put();
if (ret < 0 && ret != -ENOENT) {
- ldout(sync_env->cct, 0) << "ERROR: failed to list remote datalog shard, ret=" << ret << dendl;
+ ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to list remote datalog shard, ret=" << ret << dendl;
return ret;
}
return 0;
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
int ret;
reenter(this) {
using LockCR = RGWSimpleRadosLockCR;
return set_cr_error(retcode);
}
using WriteInfoCR = RGWSimpleRadosWriteCR<rgw_data_sync_info>;
- yield call(new WriteInfoCR(sync_env->async_rados, sync_env->svc->sysobj,
+ yield call(new WriteInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj{pool, sync_status_oid},
status->sync_info));
if (retcode < 0) {
marker.timestamp = info.last_update;
const auto& oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, i);
using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_data_sync_marker>;
- spawn(new WriteMarkerCR(sync_env->async_rados, sync_env->svc->sysobj,
+ spawn(new WriteMarkerCR(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj{pool, oid}, marker), true);
}
}
}
status->sync_info.state = rgw_data_sync_info::StateBuildingFullSyncMaps;
- yield call(new WriteInfoCR(sync_env->async_rados, sync_env->svc->sysobj,
+ yield call(new WriteInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj{pool, sync_status_oid},
status->sync_info));
if (retcode < 0) {
{
}
-int RGWRemoteDataLog::read_log_info(rgw_datalog_info *log_info)
+int RGWRemoteDataLog::read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info)
{
rgw_http_param_pair pairs[] = { { "type", "data" },
{ NULL, NULL } };
- int ret = sc.conn->get_json_resource("/admin/log", pairs, null_yield, *log_info);
+ int ret = sc.conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch datalog info" << dendl;
return ret;
return 0;
}
-int RGWRemoteDataLog::read_source_log_shards_info(map<int, RGWDataChangesLogInfo> *shards_info)
+int RGWRemoteDataLog::read_source_log_shards_info(const DoutPrefixProvider *dpp, map<int, RGWDataChangesLogInfo> *shards_info)
{
rgw_datalog_info log_info;
- int ret = read_log_info(&log_info);
+ int ret = read_log_info(dpp, &log_info);
if (ret < 0) {
return ret;
}
- return run(new RGWReadRemoteDataLogInfoCR(&sc, log_info.num_shards, shards_info));
+ return run(dpp, new RGWReadRemoteDataLogInfoCR(&sc, log_info.num_shards, shards_info));
}
-int RGWRemoteDataLog::read_source_log_shards_next(map<int, string> shard_markers, map<int, rgw_datalog_shard_data> *result)
+int RGWRemoteDataLog::read_source_log_shards_next(const DoutPrefixProvider *dpp, map<int, string> shard_markers, map<int, rgw_datalog_shard_data> *result)
{
- return run(new RGWListRemoteDataLogCR(&sc, shard_markers, 1, result));
+ return run(dpp, new RGWListRemoteDataLogCR(&sc, shard_markers, 1, result));
}
int RGWRemoteDataLog::init(const rgw_zone_id& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger,
stop();
}
-int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status)
+int RGWRemoteDataLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status)
{
// cannot run concurrently with run_sync(), so run in a separate manager
RGWCoroutinesManager crs(cct, cr_registry);
RGWDataSyncCtx sc_local = sc;
sc_local.env = &sync_env_local;
- ret = crs.run(new RGWReadDataSyncStatusCoroutine(&sc_local, sync_status));
+ ret = crs.run(dpp, new RGWReadDataSyncStatusCoroutine(&sc_local, sync_status));
http_manager.stop();
return ret;
}
-int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set<int>& recovering_shards)
+int RGWRemoteDataLog::read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set<int>& recovering_shards)
{
// cannot run concurrently with run_sync(), so run in a separate manager
RGWCoroutinesManager crs(cct, cr_registry);
omapkeys.resize(num_shards);
uint64_t max_entries{1};
- ret = crs.run(new RGWReadDataSyncRecoveringShardsCR(&sc_local, max_entries, num_shards, omapkeys));
+ ret = crs.run(dpp, new RGWReadDataSyncRecoveringShardsCR(&sc_local, max_entries, num_shards, omapkeys));
http_manager.stop();
if (ret == 0) {
return ret;
}
-int RGWRemoteDataLog::init_sync_status(int num_shards)
+int RGWRemoteDataLog::init_sync_status(const DoutPrefixProvider *dpp, int num_shards)
{
rgw_data_sync_status sync_status;
sync_status.sync_info.num_shards = num_shards;
auto instance_id = ceph::util::generate_random_number<uint64_t>();
RGWDataSyncCtx sc_local = sc;
sc_local.env = &sync_env_local;
- ret = crs.run(new RGWInitDataSyncStatusCoroutine(&sc_local, num_shards, instance_id, tn, &sync_status));
+ ret = crs.run(dpp, new RGWInitDataSyncStatusCoroutine(&sc_local, num_shards, instance_id, tn, &sync_status));
http_manager.stop();
return ret;
}
delete entries_index;
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
entries_index = new RGWShardedOmapCRManager(sync_env->async_rados, store, this, num_shards,
sync_env->svc->zone->get_zone_params().log_pool,
entrypoint, pairs, &result));
}
if (retcode < 0) {
- ldout(sync_env->cct, 0) << "ERROR: failed to fetch metadata for section bucket.instance" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata for section bucket.instance" << dendl;
return set_cr_error(retcode);
}
for (iter = result.keys.begin(); iter != result.keys.end(); ++iter) {
- ldout(sync_env->cct, 20) << "list metadata: section=bucket.instance key=" << *iter << dendl;
+ ldpp_dout(dpp, 20) << "list metadata: section=bucket.instance key=" << *iter << dendl;
key = *iter;
yield {
int shard_id = (int)iter->first;
rgw_data_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
- spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, sync_env->svc->sysobj,
+ spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)),
marker),
true);
}
} else {
- yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data.init", "",
+ yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data.init", "",
EIO, string("failed to build bucket instances map")));
}
while (collect(&ret, NULL)) {
if (ret < 0) {
- yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data.init", "",
+ yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data.init", "",
-ret, string("failed to store sync status: ") + cpp_strerror(-ret)));
req_ret = ret;
}
tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
- return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, sync_env->svc->sysobj,
+ return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, marker_oid),
sync_marker);
}
SSTR(bucket_shard_str{_sync_pair.dest_bs} << "<-" << bucket_shard_str{_sync_pair.source_bs} ))) {
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
struct all_bucket_info {
~RGWRunBucketsSyncBySourceCR() override {
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
class RGWRunBucketSourcesSyncCR : public RGWCoroutine {
const RGWSyncTraceNodeRef& _tn_parent,
ceph::real_time* progress);
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
void handle_complete_stack(uint64_t stack_id) {
auto iter = shard_progress.find(stack_id);
tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", obligation.key);
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
if (state->obligation) {
// this is already syncing in another DataSyncSingleEntryCR
if (sync_status < 0) {
// write actual sync failures for 'radosgw-admin sync error list'
if (sync_status != -EBUSY && sync_status != -EAGAIN) {
- yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data", complete->key,
+ yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", complete->key,
-sync_status, string("failed to sync bucket instance: ") + cpp_strerror(-sync_status)));
if (retcode < 0) {
tn->log(0, SSTR("ERROR: failed to log sync failure: retcode=" << retcode));
modified_shards.insert(keys.begin(), keys.end());
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
int r;
while (true) {
switch (sync_marker.state) {
sync_marker.state = rgw_data_sync_marker::IncrementalSync;
sync_marker.marker = sync_marker.next_step_marker;
sync_marker.next_step_marker.clear();
- call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, sync_env->svc->sysobj,
+ call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(pool, status_oid),
sync_marker));
}
}
RGWCoroutine *alloc_finisher_cr() override {
- return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->async_rados, sync_env->svc->sysobj,
+ return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)),
&sync_marker);
}
}
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
/* read sync status */
}
RGWCoroutine *set_sync_info_cr() {
- return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->async_rados, sync_env->svc->sysobj,
+ return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)),
sync_status.sync_info);
}
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
#define MAX_RACE_RETRIES_OBJ_FETCH 10
key, dest_key, versioned_epoch,
true,
std::static_pointer_cast<RGWFetchObjFilter>(filter),
- zones_trace, sync_env->counters, sync_env->dpp));
+ zones_trace, sync_env->counters, dpp));
}
if (retcode < 0) {
if (*need_retry) {
data_sync_cr->wakeup(shard_id, keys);
}
-int RGWRemoteDataLog::run_sync(int num_shards)
+int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards)
{
lock.lock();
data_sync_cr = new RGWDataSyncControlCR(&sc, num_shards, tn);
data_sync_cr->get(); // run() will drop a ref, so take another
lock.unlock();
- int r = run(data_sync_cr);
+ int r = run(dpp, data_sync_cr);
lock.lock();
data_sync_cr->put();
return store->ctx();
}
-int RGWDataSyncStatusManager::init()
+int RGWDataSyncStatusManager::init(const DoutPrefixProvider *dpp)
{
RGWZone *zone_def;
}
rgw_datalog_info datalog_info;
- r = source_log.read_log_info(&datalog_info);
+ r = source_log.read_log_info(dpp, &datalog_info);
if (r < 0) {
ldpp_dout(this, 5) << "ERROR: master.read_log_info() returned r=" << r << dendl;
finalize();
: RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env),
instance_key(bs.get_key()), info(_info) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
rgw_http_param_pair pairs[] = { { "type" , "bucket-index" },
status(_status), objv_tracker(objv_tracker)
{}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
/* fetch current position in logs */
yield call(new RGWReadRemoteBucketIndexLogInfoCR(sc, sync_pair.source_bs, &info));
if (write_status) {
map<string, bufferlist> attrs;
status.encode_all_attrs(attrs);
- call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker));
+ call(new RGWSimpleRadosWriteAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker));
} else {
call(new RGWRadosRemoveCR(store, obj, &objv_tracker));
}
oid(RGWBucketPipeSyncStatusManager::status_oid(sc->source_zone, sync_pair)),
status(_status), objv_tracker(objv_tracker)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWReadBucketPipeSyncStatusCoroutine::operate()
+int RGWReadBucketPipeSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
- yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->svc->sysobj,
+ yield call(new RGWSimpleRadosReadAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, oid),
&attrs, true, objv_tracker));
if (retcode == -ENOENT) {
return set_cr_done();
}
if (retcode < 0) {
- ldout(sync_env->cct, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid << " ret=" << retcode << dendl;
return set_cr_error(retcode);
}
status->decode_from_attrs(sync_env->cct, attrs);
error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry";
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWReadRecoveringBucketShardsCoroutine::operate()
+int RGWReadRecoveringBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this){
//read recovering bucket shards
}
if (retcode < 0) {
- ldout(sync_env->cct, 0) << "failed to read recovering bucket shards with "
+ ldpp_dout(dpp, 0) << "failed to read recovering bucket shards with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
status_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id);
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWReadPendingBucketShardsCoroutine::operate()
+int RGWReadPendingBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this){
//read sync status marker
using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
- yield call(new CR(sync_env->async_rados, sync_env->svc->sysobj,
+ yield call(new CR(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, status_oid),
sync_marker));
if (retcode < 0) {
- ldout(sync_env->cct,0) << "failed to read sync status marker with "
+ ldpp_dout(dpp, 0) << "failed to read sync status marker with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
}
if (retcode < 0) {
- ldout(sync_env->cct,0) << "failed to read remote data log info with "
+ ldpp_dout(dpp, 0) << "failed to read remote data log info with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
return 0;
}
-int RGWRemoteDataLog::read_shard_status(int shard_id, set<string>& pending_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries)
+int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set<string>& pending_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries)
{
// cannot run concurrently with run_sync(), so run in a separate manager
RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(store->ctx(), &crs);
pending_stack->call(new RGWReadPendingBucketShardsCoroutine(&sc_local, shard_id, pending_buckets, sync_marker, max_entries));
stacks.push_back(pending_stack);
- ret = crs.run(stacks);
+ ret = crs.run(dpp, stacks);
http_manager.stop();
return ret;
}
instance_key(bs.get_key()), marker_position(_marker_position),
result(_result) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
rgw_http_param_pair pairs[] = { { "rgwx-bucket-instance", instance_key.c_str() },
: RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env),
instance_key(bs.get_key()), marker(_marker), result(_result) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
if (sync_env->counters) {
timer.emplace(sync_env->counters, sync_counters::l_poll);
sync_marker.encode_attr(attrs);
tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
- return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj,
+ return new RGWSimpleRadosWriteAttrsCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, marker_oid),
attrs, &objv_tracker);
}
sync_marker(sync_marker), stable_timestamp(stable_timestamp),
objv_tracker(objv_tracker)
{}
- int operate() {
+ int operate(const DoutPrefixProvider *dpp) {
reenter(this) {
sync_marker.encode_attr(attrs);
- yield call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj,
+ yield call(new RGWSimpleRadosWriteAttrsCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj,
obj, attrs, &objv_tracker));
if (retcode < 0) {
return set_cr_error(retcode);
zones_trace.insert(sync_env->svc->zone->get_zone().id, _sync_pipe.info.dest_bs.get_key());
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
/* skip entries that are not complete */
if (op_state != CLS_RGW_STATE_COMPLETE) {
}
}
if (!error_ss.str().empty()) {
- yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data", error_ss.str(), -retcode, string("failed to sync object") + cpp_strerror(-sync_status)));
+ yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", error_ss.str(), -retcode, string("failed to sync object") + cpp_strerror(-sync_status)));
}
done:
if (sync_status == 0) {
prefix_handler.set_rules(sync_pipe.get_rules());
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWBucketShardFullSyncCR::operate()
+int RGWBucketShardFullSyncCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
list_marker = sync_info.full_marker.position;
sync_info.state = rgw_bucket_shard_sync_info::StateIncrementalSync;
map<string, bufferlist> attrs;
sync_info.encode_state_attr(attrs);
- call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj,
+ call(new RGWSimpleRadosWriteAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj,
rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, status_oid),
attrs));
}
return boost::starts_with(key.name, iter->first);
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWBucketShardIncrementalSyncCR::operate()
+int RGWBucketShardIncrementalSyncCR::operate(const DoutPrefixProvider *dpp)
{
int ret;
reenter(this) {
for (; entries_iter != entries_end; ++entries_iter) {
auto e = *entries_iter;
if (e.op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP) {
- ldout(sync_env->cct, 20) << "syncstop on " << e.timestamp << dendl;
+ ldpp_dout(dpp, 20) << "syncstop on " << e.timestamp << dendl;
syncstopped = true;
entries_end = std::next(entries_iter); // stop after this entry
break;
sync_info.inc_marker.position = cur_id;
if (entry->op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP || entry->op == RGWModifyOp::CLS_RGW_OP_RESYNC) {
- ldout(sync_env->cct, 20) << "detected syncstop or resync on " << entries_iter->timestamp << ", skipping entry" << dendl;
+ ldpp_dout(dpp, 20) << "detected syncstop or resync on " << entries_iter->timestamp << ", skipping entry" << dendl;
marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp);
continue;
}
const rgw_bucket& _source_bucket) : sync_env(_sync_env),
source_bucket(_source_bucket) {}
int operate() override {
- int r = sync_env->svc->bucket_sync->get_bucket_sync_hints(source_bucket,
+ int r = sync_env->svc->bucket_sync->get_bucket_sync_hints(sync_env->dpp,
+ source_bucket,
nullptr,
&targets,
null_yield);
if (r < 0) {
- ldout(sync_env->cct, 0) << "ERROR: " << __func__ << "(): failed to fetch bucket sync hints for bucket=" << source_bucket << dendl;
+ ldpp_dout(sync_env->dpp, 0) << "ERROR: " << __func__ << "(): failed to fetch bucket sync hints for bucket=" << source_bucket << dendl;
return r;
}
<< ":source_zone=" << source_zone.value_or(rgw_zone_id("*")).id))) {
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
std::ostream& operator<<(std::ostream& out, std::optional<rgw_bucket_shard>& bs) {
}
}
-int RGWRunBucketSourcesSyncCR::operate()
+int RGWRunBucketSourcesSyncCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
yield call(new RGWGetBucketPeersCR(sync_env, target_bucket, sc->source_zone, source_bucket, &pipes, tn));
return set_cr_error(retcode);
}
- ldpp_dout(sync_env->dpp, 20) << __func__ << "(): requested source_bs=" << source_bs << " target_bs=" << target_bs << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): requested source_bs=" << source_bs << " target_bs=" << target_bs << dendl;
if (pipes.empty()) {
- ldpp_dout(sync_env->dpp, 20) << __func__ << "(): no relevant sync pipes found" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): no relevant sync pipes found" << dendl;
return set_cr_done();
}
for (siter = pipes.begin(); siter != pipes.end(); ++siter) {
{
- ldpp_dout(sync_env->dpp, 20) << __func__ << "(): sync pipe=" << *siter << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): sync pipe=" << *siter << dendl;
source_num_shards = siter->source.get_bucket_info().layout.current_index.layout.normal.num_shards;
target_num_shards = siter->target.get_bucket_info().layout.current_index.layout.normal.num_shards;
}
}
- ldpp_dout(sync_env->dpp, 20) << __func__ << "(): num shards=" << num_shards << " cur_shard=" << cur_shard << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): num shards=" << num_shards << " cur_shard=" << cur_shard << dendl;
for (; num_shards > 0; --num_shards, ++cur_shard) {
/*
sync_pair.dest_bs.shard_id = -1;
}
- ldpp_dout(sync_env->dpp, 20) << __func__ << "(): sync_pair=" << sync_pair << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): sync_pair=" << sync_pair << dendl;
cur_progress = (progress ? &shard_progress[prealloc_stack_id()] : nullptr);
SSTR(bucket))) {
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWSyncGetBucketInfoCR::operate()
+int RGWSyncGetBucketInfoCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
- yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, sync_env->dpp));
+ yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp));
if (retcode == -ENOENT) {
/* bucket instance info has not been synced in yet, fetch it now */
yield {
tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata"));
string raw_key = string("bucket.instance:") + bucket.get_key();
- meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados,
+ meta_sync_env.init(dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados,
sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer);
call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key,
return set_cr_error(retcode);
}
- yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, sync_env->dpp));
+ yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp));
}
if (retcode < 0) {
tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{bucket}));
get_policy_params.bucket = bucket;
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
for (i = 0; i < 2; ++i) {
yield call(new RGWBucketGetSyncPolicyHandlerCR(sync_env->async_rados,
sync_env->store,
get_policy_params,
policy,
- sync_env->dpp));
+ dpp));
if (retcode < 0 &&
retcode != -ENOENT) {
return set_cr_error(retcode);
};
-int RGWGetBucketPeersCR::operate()
+int RGWGetBucketPeersCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
if (pipes) {
for (hiter = get_hint_targets_action->targets.begin();
hiter != get_hint_targets_action->targets.end();
++hiter) {
- ldpp_dout(sync_env->dpp, 20) << "Got sync hint for bucket=" << *source_bucket << ": " << hiter->get_key() << dendl;
+ ldpp_dout(dpp, 20) << "Got sync hint for bucket=" << *source_bucket << ": " << hiter->get_key() << dendl;
target_policy = make_shared<rgw_bucket_get_sync_policy_result>();
yield call(new RGWSyncGetBucketSyncPolicyHandlerCR(sync_env,
return 0;
}
-int RGWRunBucketsSyncBySourceCR::operate()
+int RGWRunBucketsSyncBySourceCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
return set_cr_done();
return 0;
}
-int RGWRunBucketSyncCoroutine::operate()
+int RGWRunBucketSyncCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
yield call(new RGWReadBucketPipeSyncStatusCoroutine(sc, sync_pair, &sync_status, &objv_tracker));
return new RGWRunBucketSyncCoroutine(&sc, nullptr, sync_pairs[num], sync_env->sync_tracer->root_node, nullptr);
}
-int RGWBucketPipeSyncStatusManager::init()
+int RGWBucketPipeSyncStatusManager::init(const DoutPrefixProvider *dpp)
{
int ret = http_manager.start();
if (ret < 0) {
rgw_sync_pipe_info_set pipes;
- ret = cr_mgr.run(new RGWGetBucketPeersCR(&sync_env,
+ ret = cr_mgr.run(dpp, new RGWGetBucketPeersCR(&sync_env,
dest_bucket,
source_zone,
source_bucket,
return 0;
}
-int RGWBucketPipeSyncStatusManager::init_sync_status()
+int RGWBucketPipeSyncStatusManager::init_sync_status(const DoutPrefixProvider *dpp)
{
list<RGWCoroutinesStack *> stacks;
// pass an empty objv tracker to each so that the version gets incremented
stacks.push_back(stack);
}
- return cr_mgr.run(stacks);
+ return cr_mgr.run(dpp, stacks);
}
-int RGWBucketPipeSyncStatusManager::read_sync_status()
+int RGWBucketPipeSyncStatusManager::read_sync_status(const DoutPrefixProvider *dpp)
{
list<RGWCoroutinesStack *> stacks;
stacks.push_back(stack);
}
- int ret = cr_mgr.run(stacks);
+ int ret = cr_mgr.run(dpp, stacks);
if (ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to read sync status for "
<< bucket_str{dest_bucket} << dendl;
return 0;
}
-int RGWBucketPipeSyncStatusManager::run()
+int RGWBucketPipeSyncStatusManager::run(const DoutPrefixProvider *dpp)
{
list<RGWCoroutinesStack *> stacks;
stacks.push_back(stack);
}
- int ret = cr_mgr.run(stacks);
+ int ret = cr_mgr.run(dpp, stacks);
if (ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to read sync status for "
<< bucket_str{dest_bucket} << dendl;
return prefix + ":" + obj->get_name() + ":" + obj->get_instance();
}
-int rgw_read_remote_bilog_info(RGWRESTConn* conn,
+int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp,
+ RGWRESTConn* conn,
const rgw_bucket& bucket,
BucketIndexShardsManager& markers,
optional_yield y)
{ nullptr, nullptr }
};
rgw_bucket_index_marker_info result;
- int r = conn->get_json_resource("/admin/log/", params, y, result);
+ int r = conn->get_json_resource(dpp, "/admin/log/", params, y, result);
if (r < 0) {
- lderr(conn->get_ctx()) << "failed to fetch remote log markers: " << cpp_strerror(r) << dendl;
+ ldpp_dout(dpp, -1) << "failed to fetch remote log markers: " << cpp_strerror(r) << dendl;
return r;
}
r = markers.from_string(result.max_marker, -1);
sc.init(&env, nullptr, *pipe.source.zone);
RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
- return crs.run(new RGWCollectBucketSyncStatusCR(store, &sc,
+ return crs.run(dpp, new RGWCollectBucketSyncStatusCR(store, &sc,
*psource_bucket_info,
dest_bucket_info,
status));
PerfCounters* _counters);
void finish();
- int read_log_info(rgw_datalog_info *log_info);
- int read_source_log_shards_info(map<int, RGWDataChangesLogInfo> *shards_info);
- int read_source_log_shards_next(map<int, string> shard_markers, map<int, rgw_datalog_shard_data> *result);
- int read_sync_status(rgw_data_sync_status *sync_status);
- int read_recovering_shards(const int num_shards, set<int>& recovering_shards);
- int read_shard_status(int shard_id, set<string>& lagging_buckets,set<string>& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries);
- int init_sync_status(int num_shards);
- int run_sync(int num_shards);
+ int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info);
+ int read_source_log_shards_info(const DoutPrefixProvider *dpp, map<int, RGWDataChangesLogInfo> *shards_info);
+ int read_source_log_shards_next(const DoutPrefixProvider *dpp, map<int, string> shard_markers, map<int, rgw_datalog_shard_data> *result);
+ int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status);
+ int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set<int>& recovering_shards);
+ int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set<string>& lagging_buckets,set<string>& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries);
+ int init_sync_status(const DoutPrefixProvider *dpp, int num_shards);
+ int run_sync(const DoutPrefixProvider *dpp, int num_shards);
void wakeup(int shard_id, set<string>& keys);
};
~RGWDataSyncStatusManager() {
finalize();
}
- int init();
+ int init(const DoutPrefixProvider *dpp);
void finalize();
static string shard_obj_name(const rgw_zone_id& source_zone, int shard_id);
static string sync_status_oid(const rgw_zone_id& source_zone);
- int read_sync_status(rgw_data_sync_status *sync_status) {
- return source_log.read_sync_status(sync_status);
+ int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status) {
+ return source_log.read_sync_status(dpp, sync_status);
}
- int read_recovering_shards(const int num_shards, set<int>& recovering_shards) {
- return source_log.read_recovering_shards(num_shards, recovering_shards);
+ int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set<int>& recovering_shards) {
+ return source_log.read_recovering_shards(dpp, num_shards, recovering_shards);
}
- int read_shard_status(int shard_id, set<string>& lagging_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) {
- return source_log.read_shard_status(shard_id, lagging_buckets, recovering_buckets,sync_marker, max_entries);
+ int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set<string>& lagging_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) {
+ return source_log.read_shard_status(dpp, shard_id, lagging_buckets, recovering_buckets,sync_marker, max_entries);
}
- int init_sync_status() { return source_log.init_sync_status(num_shards); }
+ int init_sync_status(const DoutPrefixProvider *dpp) { return source_log.init_sync_status(dpp, num_shards); }
- int read_log_info(rgw_datalog_info *log_info) {
- return source_log.read_log_info(log_info);
+ int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info) {
+ return source_log.read_log_info(dpp, log_info);
}
- int read_source_log_shards_info(map<int, RGWDataChangesLogInfo> *shards_info) {
- return source_log.read_source_log_shards_info(shards_info);
+ int read_source_log_shards_info(const DoutPrefixProvider *dpp, map<int, RGWDataChangesLogInfo> *shards_info) {
+ return source_log.read_source_log_shards_info(dpp, shards_info);
}
- int read_source_log_shards_next(map<int, string> shard_markers, map<int, rgw_datalog_shard_data> *result) {
- return source_log.read_source_log_shards_next(shard_markers, result);
+ int read_source_log_shards_next(const DoutPrefixProvider *dpp, map<int, string> shard_markers, map<int, rgw_datalog_shard_data> *result) {
+ return source_log.read_source_log_shards_next(dpp, shard_markers, result);
}
- int run() { return source_log.run_sync(num_shards); }
+ int run(const DoutPrefixProvider *dpp) { return source_log.run_sync(dpp, num_shards); }
void wakeup(int shard_id, set<string>& keys) { return source_log.wakeup(shard_id, keys); }
void stop() {
class BucketIndexShardsManager;
-int rgw_read_remote_bilog_info(RGWRESTConn* conn,
+int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp,
+ RGWRESTConn* conn,
const rgw_bucket& bucket,
BucketIndexShardsManager& markers,
optional_yield y);
const rgw_bucket& dest_bucket);
~RGWBucketPipeSyncStatusManager();
- int init();
+ int init(const DoutPrefixProvider *dpp);
map<int, rgw_bucket_shard_sync_info>& get_sync_status() { return sync_status; }
- int init_sync_status();
+ int init_sync_status(const DoutPrefixProvider *dpp);
static string status_oid(const rgw_zone_id& source_zone, const rgw_bucket_sync_pair_info& bs);
static string obj_status_oid(const rgw_bucket_sync_pipe& sync_pipe,
unsigned get_subsys() const override;
std::ostream& gen_prefix(std::ostream& out) const override;
- int read_sync_status();
- int run();
+ int read_sync_status(const DoutPrefixProvider *dpp);
+ int run(const DoutPrefixProvider *dpp);
};
/// read the sync status of all bucket shards from the given source zone
}
}
~RGWDataChangesOmap() override = default;
+
void prepare(ceph::real_time ut, const std::string& key,
ceph::buffer::list&& entry, entries& out) override {
if (!std::holds_alternative<centries>(out)) {
cls_log_add_prepare_entry(e, utime_t(ut), {}, key, entry);
std::get<centries>(out).push_back(std::move(e));
}
- int push(int index, entries&& items) override {
+ int push(const DoutPrefixProvider *dpp, int index, entries&& items) override {
lr::ObjectWriteOperation op;
cls_log_add(op, std::get<centries>(items), true);
- auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield);
+ auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to push to " << oids[index] << cpp_strerror(-r)
<< dendl;
}
return r;
}
- int push(int index, ceph::real_time now,
+ int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now,
const std::string& key,
ceph::buffer::list&& bl) override {
lr::ObjectWriteOperation op;
cls_log_add(op, utime_t(now), {}, key, bl);
- auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield);
+ auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to push to " << oids[index]
<< cpp_strerror(-r) << dendl;
}
return r;
}
- int list(int index, int max_entries,
+ int list(const DoutPrefixProvider *dpp, int index, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
std::optional<std::string_view> marker,
std::string* out_marker, bool* truncated) override {
lr::ObjectReadOperation op;
cls_log_list(op, {}, {}, std::string(marker.value_or("")),
max_entries, log_entries, out_marker, truncated);
- auto r = rgw_rados_operate(ioctx, oids[index], &op, nullptr, null_yield);
+ auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, null_yield);
if (r == -ENOENT) {
*truncated = false;
return 0;
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to list " << oids[index]
<< cpp_strerror(-r) << dendl;
return r;
try {
decode(log_entry.entry, liter);
} catch (ceph::buffer::error& err) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to decode data changes log entry: "
<< err.what() << dendl;
return -EIO;
}
return 0;
}
- int get_info(int index, RGWDataChangesLogInfo *info) override {
+ int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) override {
cls_log_header header;
lr::ObjectReadOperation op;
cls_log_info(op, &header);
- auto r = rgw_rados_operate(ioctx, oids[index], &op, nullptr, null_yield);
+ auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, null_yield);
if (r == -ENOENT) r = 0;
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to get info from " << oids[index]
<< cpp_strerror(-r) << dendl;
} else {
}
return r;
}
- int trim(int index, std::string_view marker) override {
+ int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) override {
lr::ObjectWriteOperation op;
cls_log_trim(op, {}, {}, {}, std::string(marker));
- auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield);
+ auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield);
if (r == -ENOENT) r = -ENODATA;
if (r < 0 && r != -ENODATA) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to get info from " << oids[index]
<< cpp_strerror(-r) << dendl;
}
return r;
}
- int trim(int index, std::string_view marker,
+ int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker,
lr::AioCompletion* c) override {
lr::ObjectWriteOperation op;
cls_log_trim(op, {}, {}, {}, std::string(marker));
auto r = ioctx.aio_operate(oids[index], c, &op, 0);
if (r == -ENOENT) r = -ENODATA;
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to get info from " << oids[index]
<< cpp_strerror(-r) << dendl;
}
std::string_view max_marker() const override {
return "99999999"sv;
}
- int is_empty() override {
+ int is_empty(const DoutPrefixProvider *dpp) override {
for (auto shard = 0u; shard < oids.size(); ++shard) {
std::list<cls_log_entry> log_entries;
lr::ObjectReadOperation op;
std::string out_marker;
bool truncated;
cls_log_list(op, {}, {}, {}, 1, log_entries, &out_marker, &truncated);
- auto r = rgw_rados_operate(ioctx, oids[shard], &op, nullptr, null_yield);
+ auto r = rgw_rados_operate(dpp, ioctx, oids[shard], &op, nullptr, null_yield);
if (r == -ENOENT) {
continue;
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to list " << oids[shard]
<< cpp_strerror(-r) << dendl;
return r;
}
std::get<centries>(out).push_back(std::move(entry));
}
- int push(int index, entries&& items) override {
- auto r = fifos[index].push(std::get<centries>(items), null_yield);
+ int push(const DoutPrefixProvider *dpp, int index, entries&& items) override {
+ auto r = fifos[index].push(dpp, std::get<centries>(items), null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": unable to push to FIFO: " << get_oid(index)
<< ": " << cpp_strerror(-r) << dendl;
}
return r;
}
- int push(int index, ceph::real_time,
+ int push(const DoutPrefixProvider *dpp, int index, ceph::real_time,
const std::string&,
ceph::buffer::list&& bl) override {
- auto r = fifos[index].push(std::move(bl), null_yield);
+ auto r = fifos[index].push(dpp, std::move(bl), null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": unable to push to FIFO: " << get_oid(index)
<< ": " << cpp_strerror(-r) << dendl;
}
return r;
}
- int list(int index, int max_entries,
+ int list(const DoutPrefixProvider *dpp, int index, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
std::optional<std::string_view> marker,
std::string* out_marker, bool* truncated) override {
std::vector<rgw::cls::fifo::list_entry> log_entries;
bool more = false;
- auto r = fifos[index].list(max_entries, marker, &log_entries, &more,
+ auto r = fifos[index].list(dpp, max_entries, marker, &log_entries, &more,
null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": unable to list FIFO: " << get_oid(index)
<< ": " << cpp_strerror(-r) << dendl;
return r;
try {
decode(log_entry.entry, liter);
} catch (const buffer::error& err) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": failed to decode data changes log entry: "
<< err.what() << dendl;
return -EIO;
}
return 0;
}
- int get_info(int index, RGWDataChangesLogInfo *info) override {
+ int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) override {
auto& fifo = fifos[index];
- auto r = fifo.read_meta(null_yield);
+ auto r = fifo.read_meta(dpp, null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": unable to get FIFO metadata: " << get_oid(index)
<< ": " << cpp_strerror(-r) << dendl;
return r;
}
rados::cls::fifo::info m;
- fifo.meta(m, null_yield);
+ fifo.meta(dpp, m, null_yield);
auto p = m.head_part_num;
if (p < 0) {
info->marker = ""s;
return 0;
}
rgw::cls::fifo::part_info h;
- r = fifo.get_part_info(p, &h, null_yield);
+ r = fifo.get_part_info(dpp, p, &h, null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": unable to get part info: " << get_oid(index) << "/" << p
<< ": " << cpp_strerror(-r) << dendl;
return r;
info->last_update = h.max_time;
return 0;
}
- int trim(int index, std::string_view marker) override {
- auto r = fifos[index].trim(marker, false, null_yield);
+ int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) override {
+ auto r = fifos[index].trim(dpp, marker, false, null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": unable to trim FIFO: " << get_oid(index)
<< ": " << cpp_strerror(-r) << dendl;
}
return r;
}
- int trim(int index, std::string_view marker,
+ int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker,
librados::AioCompletion* c) override {
int r = 0;
if (marker == rgw::cls::fifo::marker(0, 0).to_string()) {
rgw_complete_aio_completion(c, -ENODATA);
} else {
- fifos[index].trim(marker, false, c, null_yield);
+ fifos[index].trim(dpp, marker, false, c, null_yield);
}
return r;
}
rgw::cls::fifo::marker::max().to_string();
return std::string_view(mm);
}
- int is_empty() override {
+ int is_empty(const DoutPrefixProvider *dpp) override {
std::vector<rgw::cls::fifo::list_entry> log_entries;
bool more = false;
for (auto shard = 0u; shard < fifos.size(); ++shard) {
- auto r = fifos[shard].list(1, {}, &log_entries, &more,
+ auto r = fifos[shard].list(dpp, 1, {}, &log_entries, &more,
null_yield);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": unable to list FIFO: " << get_oid(shard)
<< ": " << cpp_strerror(-r) << dendl;
return r;
}
-int RGWDataChangesLog::start(const RGWZone* _zone,
+int RGWDataChangesLog::start(const DoutPrefixProvider *dpp, const RGWZone* _zone,
const RGWZoneParams& zoneparams,
librados::Rados* lr)
{
// Should be guaranteed by `set_enum_allowed`
ceph_assert(defbacking);
auto log_pool = zoneparams.log_pool;
- auto r = rgw_init_ioctx(lr, log_pool, ioctx, true, false);
+ auto r = rgw_init_ioctx(dpp, lr, log_pool, ioctx, true, false);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__
<< ": Failed to initialized ioctx, r=" << r
<< ", pool=" << log_pool << dendl;
return -r;
}
auto besr = logback_generations::init<DataLogBackends>(
- ioctx, metadata_log_oid(), [this](uint64_t gen_id, int shard) {
+ dpp, ioctx, metadata_log_oid(), [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
},
num_shards, *defbacking, null_yield, *this);
return static_cast<int>(r);
}
-int RGWDataChangesLog::renew_entries()
+int RGWDataChangesLog::renew_entries(const DoutPrefixProvider *dpp)
{
if (!zone->log_data)
return 0;
auto now = real_clock::now();
- auto ret = be->push(index, std::move(entries));
+ auto ret = be->push(dpp, index, std::move(entries));
if (ret < 0) {
/* we don't really need to have a special handling for failed cases here,
* as this is just an optimization. */
- lderr(cct) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl;
return ret;
}
ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now << " cur_expiration=" << expiration << dendl;
auto be = bes->head();
- ret = be->push(index, now, change.key, std::move(bl));
+ ret = be->push(dpp, index, now, change.key, std::move(bl));
now = real_clock::now();
return ret;
}
-int DataLogBackends::list(int shard, int max_entries,
+int DataLogBackends::list(const DoutPrefixProvider *dpp, int shard, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
std::optional<std::string_view> marker,
std::string* out_marker, bool* truncated)
auto be = i->second;
l.unlock();
gen_id = be->gen_id;
- auto r = be->list(shard, max_entries, gentries,
+ auto r = be->list(dpp, shard, max_entries, gentries,
gen_id == start_id ? start_cursor : std::string{},
&out_cursor, truncated);
if (r < 0)
return 0;
}
-int RGWDataChangesLog::list_entries(int shard, int max_entries,
+int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
std::optional<std::string_view> marker,
std::string* out_marker, bool* truncated)
{
assert(shard < num_shards);
- return bes->list(shard, max_entries, entries, marker, out_marker, truncated);
+ return bes->list(dpp, shard, max_entries, entries, marker, out_marker, truncated);
}
-int RGWDataChangesLog::list_entries(int max_entries,
+int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
LogMarker& marker, bool *ptruncated)
{
entries.clear();
for (; marker.shard < num_shards && int(entries.size()) < max_entries;
marker.shard++, marker.marker.reset()) {
- int ret = list_entries(marker.shard, max_entries - entries.size(),
+ int ret = list_entries(dpp, marker.shard, max_entries - entries.size(),
entries, marker.marker, NULL, &truncated);
if (ret == -ENOENT) {
continue;
return 0;
}
-int RGWDataChangesLog::get_info(int shard_id, RGWDataChangesLogInfo *info)
+int RGWDataChangesLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info)
{
assert(shard_id < num_shards);
auto be = bes->head();
- auto r = be->get_info(shard_id, info);
+ auto r = be->get_info(dpp, shard_id, info);
if (!info->marker.empty()) {
info->marker = gencursor(be->gen_id, info->marker);
}
return r;
}
-int DataLogBackends::trim_entries(int shard_id, std::string_view marker)
+int DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker)
{
auto [target_gen, cursor] = cursorgen(marker);
std::unique_lock l(m);
be = upper_bound(be->gen_id)->second) {
l.unlock();
auto c = be->gen_id == target_gen ? cursor : be->max_marker();
- r = be->trim(shard_id, c);
+ r = be->trim(dpp, shard_id, c);
if (r == -ENOENT)
r = -ENODATA;
if (r == -ENODATA && be->gen_id < target_gen)
return r;
}
-int RGWDataChangesLog::trim_entries(int shard_id, std::string_view marker)
+int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker)
{
assert(shard_id < num_shards);
- return bes->trim_entries(shard_id, marker);
+ return bes->trim_entries(dpp, shard_id, marker);
}
class GenTrim : public rgw::cls::fifo::Completion<GenTrim> {
const uint64_t tail_gen;
boost::intrusive_ptr<RGWDataChangesBE> be;
- GenTrim(DataLogBackends* bes, int shard_id, uint64_t target_gen,
+ GenTrim(const DoutPrefixProvider *dpp, DataLogBackends* bes, int shard_id, uint64_t target_gen,
std::string cursor, uint64_t head_gen, uint64_t tail_gen,
boost::intrusive_ptr<RGWDataChangesBE> be,
lr::AioCompletion* super)
- : Completion(super), bes(bes), shard_id(shard_id), target_gen(target_gen),
+ : Completion(dpp, super), bes(bes), shard_id(shard_id), target_gen(target_gen),
cursor(std::move(cursor)), head_gen(head_gen), tail_gen(tail_gen),
be(std::move(be)) {}
- void handle(Ptr&& p, int r) {
+ void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) {
auto gen_id = be->gen_id;
be.reset();
if (r == -ENOENT)
be = i->second;
}
auto c = be->gen_id == target_gen ? cursor : be->max_marker();
- be->trim(shard_id, c, call(std::move(p)));
+ be->trim(dpp, shard_id, c, call(std::move(p)));
}
};
-void DataLogBackends::trim_entries(int shard_id, std::string_view marker,
+void DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker,
librados::AioCompletion* c)
{
auto [target_gen, cursor] = cursorgen(marker);
}
auto be = begin()->second;
l.unlock();
- auto gt = std::make_unique<GenTrim>(this, shard_id, target_gen,
+ auto gt = std::make_unique<GenTrim>(dpp, this, shard_id, target_gen,
std::string(cursor), head_gen, tail_gen,
be, c);
auto cc = be->gen_id == target_gen ? cursor : be->max_marker();
- be->trim(shard_id, cc, GenTrim::call(std::move(gt)));
+ be->trim(dpp, shard_id, cc, GenTrim::call(std::move(gt)));
}
-int DataLogBackends::trim_generations(std::optional<uint64_t>& through) {
+int DataLogBackends::trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through) {
if (size() != 1) {
std::vector<mapped_type> candidates;
{
std::optional<uint64_t> highest;
for (auto& be : candidates) {
- auto r = be->is_empty();
+ auto r = be->is_empty(dpp);
if (r < 0) {
return r;
} else if (r == 1) {
if (!highest) {
return 0;
}
- auto ec = empty_to(*highest, null_yield);
+ auto ec = empty_to(dpp, *highest, null_yield);
if (ec) {
return ceph::from_error_code(ec);
}
}
- return ceph::from_error_code(remove_empty(null_yield));
+ return ceph::from_error_code(remove_empty(dpp, null_yield));
}
-int RGWDataChangesLog::trim_entries(int shard_id, std::string_view marker,
+int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker,
librados::AioCompletion* c)
{
assert(shard_id < num_shards);
- bes->trim_entries(shard_id, marker, c);
+ bes->trim_entries(dpp, shard_id, marker, c);
return 0;
}
static constexpr auto runs_per_prune = 150;
auto run = 0;
for (;;) {
- dout(2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl;
- int r = renew_entries();
+ const DoutPrefix dp(cct, dout_subsys, "rgw data changes log: ");
+ ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl;
+ int r = renew_entries(&dp);
if (r < 0) {
- dout(0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r << dendl;
+ ldpp_dout(&dp, 0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r << dendl;
}
if (going_down())
if (run == runs_per_prune) {
std::optional<uint64_t> through;
- dout(2) << "RGWDataChangesLog::ChangesRenewThread: pruning old generations" << dendl;
- trim_generations(through);
+ ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruning old generations" << dendl;
+ trim_generations(&dp, through);
if (r < 0) {
derr << "RGWDataChangesLog::ChangesRenewThread: failed pruning r="
<< r << dendl;
} else if (through) {
- dout(2) << "RGWDataChangesLog::ChangesRenewThread: pruned generations "
+ ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruned generations "
<< "through " << *through << "." << dendl;
} else {
- dout(2) << "RGWDataChangesLog::ChangesRenewThread: nothing to prune."
+ ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: nothing to prune."
<< dendl;
}
run = 0;
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
}
-int RGWDataChangesLog::change_format(log_type type, optional_yield y) {
- return ceph::from_error_code(bes->new_backing(type, y));
+int RGWDataChangesLog::change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y) {
+ return ceph::from_error_code(bes->new_backing(dpp, type, y));
}
-int RGWDataChangesLog::trim_generations(std::optional<uint64_t>& through) {
- return bes->trim_generations(through);
+int RGWDataChangesLog::trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through) {
+ return bes->trim_generations(dpp, through);
}
--i;
return i->second;
}
- int list(int shard, int max_entries,
+ int list(const DoutPrefixProvider *dpp, int shard, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
std::optional<std::string_view> marker,
std::string* out_marker, bool* truncated);
- int trim_entries(int shard_id, std::string_view marker);
- void trim_entries(int shard_id, std::string_view marker,
+ int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker);
+ void trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker,
librados::AioCompletion* c);
void set_zero(RGWDataChangesBE* be) {
emplace(0, be);
bs::error_code handle_new_gens(entries_t e) noexcept override;
bs::error_code handle_empty_to(uint64_t new_tail) noexcept override;
- int trim_generations(std::optional<uint64_t>& through);
+ int trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through);
};
class RGWDataChangesLog {
int choose_oid(const rgw_bucket_shard& bs);
bool going_down() const;
bool filter_bucket(const DoutPrefixProvider *dpp, const rgw_bucket& bucket, optional_yield y) const;
- int renew_entries();
+ int renew_entries(const DoutPrefixProvider *dpp);
public:
RGWDataChangesLog(CephContext* cct);
~RGWDataChangesLog();
- int start(const RGWZone* _zone, const RGWZoneParams& zoneparams,
+ int start(const DoutPrefixProvider *dpp, const RGWZone* _zone, const RGWZoneParams& zoneparams,
librados::Rados* lr);
int add_entry(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id);
int get_log_shard_id(rgw_bucket& bucket, int shard_id);
- int list_entries(int shard, int max_entries,
+ int list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
std::optional<std::string_view> marker,
std::string* out_marker, bool* truncated);
- int trim_entries(int shard_id, std::string_view marker);
- int trim_entries(int shard_id, std::string_view marker,
+ int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker);
+ int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker,
librados::AioCompletion* c); // :(
- int get_info(int shard_id, RGWDataChangesLogInfo *info);
+ int get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info);
using LogMarker = RGWDataChangesLogMarker;
- int list_entries(int max_entries,
+ int list_entries(const DoutPrefixProvider *dpp, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
LogMarker& marker, bool* ptruncated);
std::string get_oid(uint64_t gen_id, int shard_id) const;
- int change_format(log_type type, optional_yield y);
- int trim_generations(std::optional<uint64_t>& through);
+ int change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y);
+ int trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through);
};
class RGWDataChangesBE : public boost::intrusive_ref_counter<RGWDataChangesBE> {
const std::string& key,
ceph::buffer::list&& entry,
entries& out) = 0;
- virtual int push(int index, entries&& items) = 0;
- virtual int push(int index, ceph::real_time now,
+ virtual int push(const DoutPrefixProvider *dpp, int index, entries&& items) = 0;
+ virtual int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now,
const std::string& key,
ceph::buffer::list&& bl) = 0;
- virtual int list(int shard, int max_entries,
+ virtual int list(const DoutPrefixProvider *dpp, int shard, int max_entries,
std::vector<rgw_data_change_log_entry>& entries,
std::optional<std::string_view> marker,
std::string* out_marker, bool* truncated) = 0;
- virtual int get_info(int index, RGWDataChangesLogInfo *info) = 0;
- virtual int trim(int index, std::string_view marker) = 0;
- virtual int trim(int index, std::string_view marker,
+ virtual int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) = 0;
+ virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) = 0;
+ virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker,
librados::AioCompletion* c) = 0;
virtual std::string_view max_marker() const = 0;
// 1 on empty, 0 on non-empty, negative on error.
- virtual int is_empty() = 0;
+ virtual int is_empty(const DoutPrefixProvider *dpp) = 0;
};
o.push_back(p);
}
-void RGWObjManifest::obj_iterator::seek(uint64_t o)
-{
- ofs = o;
- if (manifest->explicit_objs) {
- explicit_iter = manifest->objs.upper_bound(ofs);
- if (explicit_iter != manifest->objs.begin()) {
- --explicit_iter;
- }
- if (ofs < manifest->obj_size) {
- update_explicit_pos();
- } else {
- ofs = manifest->obj_size;
- }
- update_location();
- return;
- }
- if (o < manifest->get_head_size()) {
- rule_iter = manifest->rules.begin();
- stripe_ofs = 0;
- stripe_size = manifest->get_head_size();
- if (rule_iter != manifest->rules.end()) {
- cur_part_id = rule_iter->second.start_part_num;
- cur_override_prefix = rule_iter->second.override_prefix;
- }
- update_location();
- return;
- }
-
- rule_iter = manifest->rules.upper_bound(ofs);
- next_rule_iter = rule_iter;
- if (rule_iter != manifest->rules.begin()) {
- --rule_iter;
- }
-
- if (rule_iter == manifest->rules.end()) {
- update_location();
- return;
- }
-
- RGWObjManifestRule& rule = rule_iter->second;
-
- if (rule.part_size > 0) {
- cur_part_id = rule.start_part_num + (ofs - rule.start_ofs) / rule.part_size;
- } else {
- cur_part_id = rule.start_part_num;
- }
- part_ofs = rule.start_ofs + (cur_part_id - rule.start_part_num) * rule.part_size;
-
- if (rule.stripe_max_size > 0) {
- cur_stripe = (ofs - part_ofs) / rule.stripe_max_size;
-
- stripe_ofs = part_ofs + cur_stripe * rule.stripe_max_size;
- if (!cur_part_id && manifest->get_head_size() > 0) {
- cur_stripe++;
- }
- } else {
- cur_stripe = 0;
- stripe_ofs = part_ofs;
- }
-
- if (!rule.part_size) {
- stripe_size = rule.stripe_max_size;
- stripe_size = std::min(manifest->get_obj_size() - stripe_ofs, stripe_size);
- } else {
- uint64_t next = std::min(stripe_ofs + rule.stripe_max_size, part_ofs + rule.part_size);
- stripe_size = next - stripe_ofs;
- }
-
- cur_override_prefix = rule.override_prefix;
-
- update_location();
-}
-
-void RGWObjManifest::obj_iterator::update_location()
-{
- if (manifest->explicit_objs) {
- if (manifest->empty()) {
- location = rgw_obj_select{};
- } else {
- location = explicit_iter->second.loc;
- }
- return;
- }
-
- if (ofs < manifest->get_head_size()) {
- location = manifest->get_obj();
- location.set_placement_rule(manifest->get_head_placement_rule());
- return;
- }
-
- manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, &cur_override_prefix, &location);
-}
-
-void RGWObjManifest::obj_iterator::update_explicit_pos()
-{
- ofs = explicit_iter->first;
- stripe_ofs = ofs;
-
- map<uint64_t, RGWObjManifestPart>::iterator next_iter = explicit_iter;
- ++next_iter;
- if (next_iter != manifest->objs.end()) {
- stripe_size = next_iter->first - ofs;
- } else {
- stripe_size = manifest->obj_size - ofs;
- }
-}
-
void RGWObjManifest::generate_test_instances(std::list<RGWObjManifest*>& o)
{
RGWObjManifest *m = new RGWObjManifest;
o.push_back(new RGWObjManifest);
}
-void RGWObjManifest::get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, string *override_prefix, rgw_obj_select *location)
-{
- rgw_obj loc;
-
- string& oid = loc.key.name;
- string& ns = loc.key.ns;
-
- if (!override_prefix || override_prefix->empty()) {
- oid = prefix;
- } else {
- oid = *override_prefix;
- }
-
- if (!cur_part_id) {
- if (ofs < max_head_size) {
- location->set_placement_rule(head_placement_rule);
- *location = obj;
- return;
- } else {
- char buf[16];
- snprintf(buf, sizeof(buf), "%d", (int)cur_stripe);
- oid += buf;
- ns = shadow_ns;
- }
- } else {
- char buf[32];
- if (cur_stripe == 0) {
- snprintf(buf, sizeof(buf), ".%d", (int)cur_part_id);
- oid += buf;
- ns= RGW_OBJ_NS_MULTIPART;
- } else {
- snprintf(buf, sizeof(buf), ".%d_%d", (int)cur_part_id, (int)cur_stripe);
- oid += buf;
- ns = shadow_ns;
- }
- }
-
- if (!tail_placement.bucket.name.empty()) {
- loc.bucket = tail_placement.bucket;
- } else {
- loc.bucket = obj.bucket;
- }
-
- // Always overwrite instance with tail_instance
- // to get the right shadow object location
- loc.key.set_instance(tail_instance);
-
- location->set_placement_rule(tail_placement.placement_rule);
- *location = loc;
-}
-
-
void rgw_log_entry::generate_test_instances(list<rgw_log_entry*>& o)
{
namespace rgw::putobj {
-int create_etag_verifier(CephContext* cct, DataProcessor* filter,
+int create_etag_verifier(const DoutPrefixProvider *dpp,
+ CephContext* cct, DataProcessor* filter,
const bufferlist& manifest_bl,
const std::optional<RGWCompressionInfo>& compression,
etag_verifier_ptr& verifier)
auto miter = manifest_bl.cbegin();
decode(manifest, miter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl;
return -EIO;
}
RGWObjManifestRule rule;
bool found = manifest.get_rule(0, &rule);
if (!found) {
- lderr(cct) << "ERROR: manifest->get_rule() could not find rule" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: manifest->get_rule() could not find rule" << dendl;
return -EIO;
}
* MPU part. These part ETags then become the input for the MPU object
* Etag.
*/
- for (auto mi = manifest.obj_begin(); mi != manifest.obj_end(); ++mi) {
+ for (auto mi = manifest.obj_begin(dpp); mi != manifest.obj_end(dpp); ++mi) {
if (cur_part_ofs == mi.get_part_ofs())
continue;
cur_part_ofs = mi.get_part_ofs();
- ldout(cct, 20) << "MPU Part offset:" << cur_part_ofs << dendl;
+ ldpp_dout(dpp, 20) << "MPU Part offset:" << cur_part_ofs << dendl;
part_ofs.push_back(cur_part_ofs);
}
};
block = std::lower_bound(block, blocks.end(), ofs, less);
if (block == blocks.end() || block->new_ofs != ofs) {
- ldout(cct, 4) << "no match for compressed offset " << ofs
+ ldpp_dout(dpp, 4) << "no match for compressed offset " << ofs
<< ", disabling etag verification" << dendl;
return -EIO;
}
ofs = block->old_ofs;
- ldout(cct, 20) << "MPU Part uncompressed offset:" << ofs << dendl;
+ ldpp_dout(dpp, 20) << "MPU Part uncompressed offset:" << ofs << dendl;
}
}
);
using etag_verifier_ptr = ceph::static_ptr<ETagVerifier, max_etag_verifier_size>;
-int create_etag_verifier(CephContext* cct, DataProcessor* next,
+int create_etag_verifier(const DoutPrefixProvider *dpp,
+ CephContext* cct, DataProcessor* next,
const bufferlist& manifest_bl,
const std::optional<RGWCompressionInfo>& compression,
etag_verifier_ptr& verifier);
attrbl.append(val.c_str(), val.size() + 1);
}
- op_ret = rgw_get_request_metadata(state->cct, state->info, attrs);
+ op_ret = rgw_get_request_metadata(this, state->cct, state->info, attrs);
if (op_ret < 0) {
goto done;
}
op.create(false);
const uint64_t queue_size = cct->_conf->rgw_gc_max_queue_size, num_deferred_entries = cct->_conf->rgw_gc_max_deferred;
gc_log_init2(op, queue_size, num_deferred_entries);
- store->gc_operate(obj_names[i], &op);
+ store->gc_operate(this, obj_names[i], &op);
}
}
ldpp_dout(this, 20) << "RGWGC::send_chain - on object name: " << obj_names[i] << "tag is: " << tag << dendl;
- auto ret = store->gc_operate(obj_names[i], &op);
+ auto ret = store->gc_operate(this, obj_names[i], &op);
if (ret != -ECANCELED && ret != -EPERM) {
return ret;
}
ObjectWriteOperation set_entry_op;
cls_rgw_gc_set_entry(set_entry_op, cct->_conf->rgw_gc_obj_min_wait, info);
- return store->gc_operate(obj_names[i], &set_entry_op);
+ return store->gc_operate(this, obj_names[i], &set_entry_op);
}
struct defer_chain_state {
ObjectWriteOperation op;
cls_rgw_gc_queue_remove_entries(op, num_entries);
- return store->gc_operate(obj_names[index], &op);
+ return store->gc_operate(this, obj_names[index], &op);
}
int RGWGC::list(int *index, string& marker, uint32_t max, bool expired_only, std::list<cls_rgw_gc_obj_info>& result, bool *truncated, bool& processing_queue)
if (obj.pool != last_pool) {
delete ctx;
ctx = new IoCtx;
- ret = rgw_init_ioctx(store->get_rados_handle(), obj.pool, *ctx);
+ ret = rgw_init_ioctx(this, store->get_rados_handle(), obj.pool, *ctx);
if (ret < 0) {
if (transitioned_objects_cache[index]) {
goto done;
::encode_json("tail_instance", tail_instance, f);
::encode_json("tail_placement", tail_placement, f);
- f->dump_object("begin_iter", begin_iter);
- f->dump_object("end_iter", end_iter);
+ // nullptr being passed into iterators since there
+ // is no cct and we aren't doing anything with these
+ // iterators that would write do the log
+ f->dump_object("begin_iter", obj_begin(nullptr));
+ f->dump_object("end_iter", obj_end(nullptr));
}
void rgw_log_entry::dump(Formatter *f) const
<< dendl;
do {
- int ret = lock->try_lock(lock_duration, null_yield);
+ int ret = lock->try_lock(this, lock_duration, null_yield);
if (ret == -EBUSY || ret == -EEXIST) {
/* already locked by another lc processor */
ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to acquire lock on "
utime_t time(max_lock_secs, 0);
- int ret = lock->try_lock(time, null_yield);
+ int ret = lock->try_lock(this, time, null_yield);
if (ret == -EBUSY || ret == -EEXIST) {
/* already locked by another lc processor */
ldpp_dout(this, 0) << "RGWLC::process() failed to acquire lock on "
}
template<typename F>
-static int guard_lc_modify(rgw::sal::Store* store,
+static int guard_lc_modify(const DoutPrefixProvider *dpp,
+ rgw::sal::Store* store,
rgw::sal::Lifecycle* sal_lc,
const rgw_bucket& bucket, const string& cookie,
const F& f) {
int ret;
do {
- ret = lock->try_lock(time, null_yield);
+ ret = lock->try_lock(dpp, time, null_yield);
if (ret == -EBUSY || ret == -EEXIST) {
- ldout(cct, 0) << "RGWLC::RGWPutLC() failed to acquire lock on "
+ ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on "
<< oid << ", sleep 5, try again" << dendl;
sleep(5); // XXX: return retryable error
continue;
}
if (ret < 0) {
- ldout(cct, 0) << "RGWLC::RGWPutLC() failed to acquire lock on "
+ ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on "
<< oid << ", ret=" << ret << dendl;
break;
}
ret = f(sal_lc, oid, entry);
if (ret < 0) {
- ldout(cct, 0) << "RGWLC::RGWPutLC() failed to set entry on "
+ ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to set entry on "
<< oid << ", ret=" << ret << dendl;
}
break;
rgw_bucket& b = bucket->get_key();
- ret = guard_lc_modify(store, sal_lc.get(), b, cookie,
+ ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie,
[&](rgw::sal::Lifecycle* sal_lc, const string& oid,
const rgw::sal::Lifecycle::LCEntry& entry) {
return sal_lc->set_entry(oid, entry);
rgw_bucket& b = bucket->get_key();
if (ret < 0) {
- ldout(cct, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket="
+ ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket="
<< b.name << " returned err=" << ret << dendl;
return ret;
}
- ret = guard_lc_modify(store, sal_lc.get(), b, cookie,
+ ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie,
[&](rgw::sal::Lifecycle* sal_lc, const string& oid,
const rgw::sal::Lifecycle::LCEntry& entry) {
return sal_lc->rm_entry(oid, entry);
namespace rgw::lc {
-int fix_lc_shard_entry(rgw::sal::Store* store,
+int fix_lc_shard_entry(const DoutPrefixProvider *dpp,
+ rgw::sal::Store* store,
rgw::sal::Lifecycle* sal_lc,
rgw::sal::Bucket* bucket)
{
// We are not dropping the old marker here as that would be caught by the next LC process update
int ret = sal_lc->get_entry(lc_oid, shard_name, entry);
if (ret == 0) {
- ldout(store->ctx(), 5) << "Entry already exists, nothing to do" << dendl;
+ ldpp_dout(dpp, 5) << "Entry already exists, nothing to do" << dendl;
return ret; // entry is already existing correctly set to marker
}
- ldout(store->ctx(), 5) << "lc_get_entry errored ret code=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "lc_get_entry errored ret code=" << ret << dendl;
if (ret == -ENOENT) {
- ldout(store->ctx(), 1) << "No entry for bucket=" << bucket
+ ldpp_dout(dpp, 1) << "No entry for bucket=" << bucket
<< " creating " << dendl;
// TODO: we have too many ppl making cookies like this!
char cookie_buf[COOKIE_LEN + 1];
gen_rand_alphanumeric(store->ctx(), cookie_buf, sizeof(cookie_buf) - 1);
std::string cookie = cookie_buf;
- ret = guard_lc_modify(
+ ret = guard_lc_modify(dpp,
store, sal_lc, bucket->get_key(), cookie,
[&lc_oid](rgw::sal::Lifecycle* slc,
const string& oid,
namespace rgw::lc {
-int fix_lc_shard_entry(rgw::sal::Store* store,
+int fix_lc_shard_entry(const DoutPrefixProvider *dpp,
+ rgw::sal::Store* store,
rgw::sal::Lifecycle* sal_lc,
rgw::sal::Bucket* bucket);
#include "services/svc_zone_utils.h"
#include "include/ceph_assert.h"
+#define dout_subsys ceph_subsys_rgw
+
class OpsLogSocket;
namespace rgw {
class RGWLibFrontend;
- class RGWLib {
+ class RGWLib : public DoutPrefixProvider {
RGWFrontendConfig* fec;
RGWLibFrontend* fe;
OpsLogSocket* olog;
rgw::LDAPHelper* get_ldh() { return ldh; }
+ CephContext *get_cct() const override { return cct.get(); }
+ unsigned get_subsys() const { return dout_subsys; }
+ std::ostream& gen_prefix(std::ostream& out) const { return out << "lib rgw: "; }
+
int init();
int init(vector<const char *>& args);
int stop();
} /* enqueue_req */
/* "regular" requests */
- void handle_request(RGWRequest* req) override; // async handler, deletes req
+ void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override; // async handler, deletes req
int process_request(RGWLibRequest* req);
int process_request(RGWLibRequest* req, RGWLibIO* io);
void set_access_key(RGWAccessKey& key) { access_key = key; }
date_str = rgw_to_asctime(tm);
}
-int RGWLoadGenRequestEnv::sign(RGWAccessKey& access_key)
+int RGWLoadGenRequestEnv::sign(const DoutPrefixProvider *dpp, RGWAccessKey& access_key)
{
meta_map_t meta_map;
map<string, string> sub_resources;
string canonical_header;
string digest;
- rgw_create_s3_canonical_header(request_method.c_str(),
+ rgw_create_s3_canonical_header(dpp,
+ request_method.c_str(),
nullptr, /* const char *content_md5 */
content_type.c_str(),
date_str.c_str(),
}
void set_date(utime_t& tm);
- int sign(RGWAccessKey& access_key);
+ int sign(const DoutPrefixProvider *dpp, RGWAccessKey& access_key);
};
/* XXX does RGWLoadGenIO actually want to perform stream/HTTP I/O,
req_wq.queue(req);
} /* RGWLoadGenProcess::gen_request */
-void RGWLoadGenProcess::handle_request(RGWRequest* r)
+void RGWLoadGenProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r)
{
RGWLoadGenRequest* req = static_cast<RGWLoadGenRequest*>(r);
env.request_method = req->method;
env.uri = req->resource;
env.set_date(tm);
- env.sign(access_key);
+ env.sign(dpp, access_key);
RGWLoadGenIO real_client_io(&env);
RGWRestfulIO client_io(cct, &real_client_io);
}
/* usage logger */
-class UsageLogger {
+class UsageLogger : public DoutPrefixProvider {
CephContext *cct;
rgw::sal::Store* store;
map<rgw_user_bucket, RGWUsageBatch> usage_map;
num_entries = 0;
lock.unlock();
- store->log_usage(old_map);
+ store->log_usage(this, old_map);
}
+
+ CephContext *get_cct() const override { return cct; }
+ unsigned get_subsys() const override { return dout_subsys; }
+ std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw UsageLogger: "; }
};
static UsageLogger *usage_logger = NULL;
if (!s->is_err())
data.successful_ops = 1;
- ldout(s->cct, 30) << "log_usage: bucket_name=" << bucket_name
+ ldpp_dout(s, 30) << "log_usage: bucket_name=" << bucket_name
<< " tenant=" << s->bucket_tenant
<< ", bytes_sent=" << bytes_sent << ", bytes_received="
<< bytes_received << ", success=" << data.successful_ops << dendl;
return 0;
if (s->bucket_name.empty()) {
- ldout(s->cct, 5) << "nothing to log for operation" << dendl;
+ ldpp_dout(s, 5) << "nothing to log for operation" << dendl;
return -EINVAL;
}
if (s->err.ret == -ERR_NO_SUCH_BUCKET || rgw::sal::Bucket::empty(s->bucket.get())) {
if (!s->cct->_conf->rgw_log_nonexistent_bucket) {
- ldout(s->cct, 5) << "bucket " << s->bucket_name << " doesn't exist, not logging" << dendl;
+ ldpp_dout(s, 5) << "bucket " << s->bucket_name << " doesn't exist, not logging" << dendl;
return 0;
}
bucket_id = "";
entry.bucket = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name);
if (check_utf8(entry.bucket.c_str(), entry.bucket.size()) != 0) {
- ldout(s->cct, 5) << "not logging op on bucket with non-utf8 name" << dendl;
+ ldpp_dout(s, 5) << "not logging op on bucket with non-utf8 name" << dendl;
return 0;
}
if (s->cct->_conf->rgw_ops_log_rados) {
string oid = render_log_object_name(s->cct->_conf->rgw_log_object_name, &bdt,
entry.bucket_id, entry.bucket);
- ret = store->log_op(oid, bl);
+ ret = store->log_op(s, oid, bl);
}
if (olog) {
olog->log(entry);
}
if (ret < 0)
- ldout(s->cct, 0) << "ERROR: failed to log entry" << dendl;
+ ldpp_dout(s, 0) << "ERROR: failed to log entry" << dendl;
return ret;
}
namespace {
/// Return the shard type, and a bool to see whether it has entries.
std::pair<shard_check, bool>
-probe_shard(librados::IoCtx& ioctx, const std::string& oid,
+probe_shard(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
bool& fifo_unsupported, optional_yield y)
{
- auto cct = static_cast<CephContext*>(ioctx.cct());
bool omap = false;
{
librados::ObjectReadOperation op;
cls_log_header header;
cls_log_info(op, &header);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r == -ENOENT) {
return { shard_check::dne, {} };
}
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " error probing for omap: r=" << r
<< ", oid=" << oid << dendl;
return { shard_check::corrupt, {} };
}
if (!fifo_unsupported) {
std::unique_ptr<rgw::cls::fifo::FIFO> fifo;
- auto r = rgw::cls::fifo::FIFO::open(ioctx, oid,
+ auto r = rgw::cls::fifo::FIFO::open(dpp, ioctx, oid,
&fifo, y,
std::nullopt, true);
if (r < 0 && !(r == -ENOENT || r == -ENODATA || r == -EPERM)) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " error probing for fifo: r=" << r
<< ", oid=" << oid << dendl;
return { shard_check::corrupt, {} };
}
if (fifo && omap) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " fifo and omap found: oid=" << oid << dendl;
return { shard_check::corrupt, {} };
}
if (fifo) {
bool more = false;
std::vector<rgw::cls::fifo::list_entry> entries;
- r = fifo->list(1, nullopt, &entries, &more, y);
+ r = fifo->list(dpp, 1, nullopt, &entries, &more, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": unable to list entries: r=" << r
<< ", oid=" << oid << dendl;
return { shard_check::corrupt, {} };
librados::ObjectReadOperation op;
cls_log_list(op, {}, {}, {}, 1, entries,
&out_marker, &truncated);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed to list: r=" << r << ", oid=" << oid << dendl;
return { shard_check::corrupt, {} };
}
}
tl::expected<log_type, bs::error_code>
-handle_dne(librados::IoCtx& ioctx,
+handle_dne(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx,
log_type def,
std::string oid,
bool fifo_unsupported,
optional_yield y)
{
- auto cct = static_cast<CephContext*>(ioctx.cct());
if (def == log_type::fifo) {
if (fifo_unsupported) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " WARNING: FIFO set as default but not supported by OSD. "
<< "Falling back to OMAP." << dendl;
return log_type::omap;
}
std::unique_ptr<rgw::cls::fifo::FIFO> fifo;
- auto r = rgw::cls::fifo::FIFO::create(ioctx, oid,
+ auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid,
&fifo, y,
std::nullopt);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " error creating FIFO: r=" << r
<< ", oid=" << oid << dendl;
return tl::unexpected(bs::error_code(-r, bs::system_category()));
}
tl::expected<log_type, bs::error_code>
-log_backing_type(librados::IoCtx& ioctx,
+log_backing_type(const DoutPrefixProvider *dpp,
+ librados::IoCtx& ioctx,
log_type def,
int shards,
const fu2::unique_function<std::string(int) const>& get_oid,
optional_yield y)
{
- auto cct = static_cast<CephContext*>(ioctx.cct());
auto check = shard_check::dne;
bool fifo_unsupported = false;
for (int i = 0; i < shards; ++i) {
- auto [c, e] = probe_shard(ioctx, get_oid(i), fifo_unsupported, y);
+ auto [c, e] = probe_shard(dpp, ioctx, get_oid(i), fifo_unsupported, y);
if (c == shard_check::corrupt)
return tl::unexpected(bs::error_code(EIO, bs::system_category()));
if (c == shard_check::dne) continue;
}
if (check != c) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " clashing types: check=" << check
<< ", c=" << c << dendl;
return tl::unexpected(bs::error_code(EIO, bs::system_category()));
}
}
if (check == shard_check::corrupt) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " should be unreachable!" << dendl;
return tl::unexpected(bs::error_code(EIO, bs::system_category()));
}
if (check == shard_check::dne)
- return handle_dne(ioctx,
+ return handle_dne(dpp, ioctx,
def,
get_oid(0),
fifo_unsupported,
return (check == shard_check::fifo ? log_type::fifo : log_type::omap);
}
-bs::error_code log_remove(librados::IoCtx& ioctx,
+bs::error_code log_remove(const DoutPrefixProvider *dpp,
+ librados::IoCtx& ioctx,
int shards,
const fu2::unique_function<std::string(int) const>& get_oid,
bool leave_zero,
optional_yield y)
{
bs::error_code ec;
- auto cct = static_cast<CephContext*>(ioctx.cct());
for (int i = 0; i < shards; ++i) {
auto oid = get_oid(i);
rados::cls::fifo::info info;
uint32_t part_header_size = 0, part_entry_overhead = 0;
- auto r = rgw::cls::fifo::get_meta(ioctx, oid, nullopt, &info,
+ auto r = rgw::cls::fifo::get_meta(dpp, ioctx, oid, nullopt, &info,
&part_header_size, &part_entry_overhead,
0, y, true);
if (r == -ENOENT) continue;
librados::ObjectWriteOperation op;
op.remove();
auto part_oid = info.part_oid(j);
- auto subr = rgw_rados_operate(ioctx, part_oid, &op, null_yield);
+ auto subr = rgw_rados_operate(dpp, ioctx, part_oid, &op, null_yield);
if (subr < 0 && subr != -ENOENT) {
if (!ec)
ec = bs::error_code(-subr, bs::system_category());
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed removing FIFO part: part_oid=" << part_oid
<< ", subr=" << subr << dendl;
}
if (r < 0 && r != -ENODATA) {
if (!ec)
ec = bs::error_code(-r, bs::system_category());
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed checking FIFO part: oid=" << oid
<< ", r=" << r << dendl;
}
} else {
op.remove();
}
- r = rgw_rados_operate(ioctx, oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ioctx, oid, &op, null_yield);
if (r < 0 && r != -ENOENT) {
if (!ec)
ec = bs::error_code(-r, bs::system_category());
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed removing shard: oid=" << oid
<< ", r=" << r << dendl;
}
}
}
-bs::error_code logback_generations::setup(log_type def,
+bs::error_code logback_generations::setup(const DoutPrefixProvider *dpp,
+ log_type def,
optional_yield y) noexcept
{
try {
- auto cct = static_cast<CephContext*>(ioctx.cct());
// First, read.
- auto res = read(y);
+ auto cct = static_cast<CephContext*>(ioctx.cct());
+ auto res = read(dpp, y);
if (!res && res.error() != bs::errc::no_such_file_or_directory) {
return res.error();
}
// Are we the first? Then create generation 0 and the generations
// metadata.
librados::ObjectWriteOperation op;
- auto type = log_backing_type(ioctx, def, shards,
+ auto type = log_backing_type(dpp, ioctx, def, shards,
[this](int shard) {
return this->get_oid(0, shard);
}, y);
lock.unlock();
op.write_full(bl);
- auto r = rgw_rados_operate(ioctx, oid, &op, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r < 0 && r != -EEXIST) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed writing oid=" << oid
<< ", r=" << r << dendl;
bs::system_error(-r, bs::system_category());
}
// Did someone race us? Then re-read.
if (r != 0) {
- res = read(y);
+ res = read(dpp, y);
if (!res)
return res.error();
if (res->first.empty())
// generation zero, incremented, then erased generation zero,
// don't leave generation zero lying around.
if (l.gen_id != 0) {
- auto ec = log_remove(ioctx, shards,
+ auto ec = log_remove(dpp, ioctx, shards,
[this](int shard) {
return this->get_oid(0, shard);
}, true, y);
m.unlock();
auto ec = watch();
if (ec) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed to re-establish watch, unsafe to continue: oid="
<< oid << ", ec=" << ec.message() << dendl;
}
}
}
-bs::error_code logback_generations::update(optional_yield y) noexcept
+bs::error_code logback_generations::update(const DoutPrefixProvider *dpp, optional_yield y) noexcept
{
try {
- auto cct = static_cast<CephContext*>(ioctx.cct());
- auto res = read(y);
+ auto res = read(dpp, y);
if (!res) {
return res.error();
}
// Check consistency and prepare update
if (es.empty()) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Read empty update." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
assert(cur_lowest != entries_.cend());
auto new_lowest = lowest_nomempty(es);
if (new_lowest == es.cend()) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Read update with no active head." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
if (new_lowest->first < cur_lowest->first) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Tail moved wrong way." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
entries_t new_entries;
if ((es.end() - 1)->first < (entries_.end() - 1)->first) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Head moved wrong way." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
return {};
}
-auto logback_generations::read(optional_yield y) noexcept ->
+auto logback_generations::read(const DoutPrefixProvider *dpp, optional_yield y) noexcept ->
tl::expected<std::pair<entries_t, obj_version>, bs::error_code>
{
try {
- auto cct = static_cast<CephContext*>(ioctx.cct());
librados::ObjectReadOperation op;
std::unique_lock l(m);
cls_version_check(op, version, VER_COND_GE);
cls_version_read(op, &v2);
cb::list bl;
op.read(0, 0, &bl, nullptr);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r < 0) {
if (r == -ENOENT) {
- ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": oid=" << oid
<< " not found" << dendl;
} else {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed reading oid=" << oid
<< ", r=" << r << dendl;
}
}
}
-bs::error_code logback_generations::write(entries_t&& e,
+bs::error_code logback_generations::write(const DoutPrefixProvider *dpp, entries_t&& e,
std::unique_lock<std::mutex>&& l_,
optional_yield y) noexcept
{
ceph_assert(l.mutex() == &m &&
l.owns_lock());
try {
- auto cct = static_cast<CephContext*>(ioctx.cct());
librados::ObjectWriteOperation op;
cls_version_check(op, version, VER_COND_GE);
cb::list bl;
encode(e, bl);
op.write_full(bl);
cls_version_inc(op);
- auto r = rgw_rados_operate(ioctx, oid, &op, y);
+ auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r == 0) {
entries_ = std::move(e);
version.inc();
}
l.unlock();
if (r < 0 && r != -ECANCELED) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed reading oid=" << oid
<< ", r=" << r << dendl;
return { -r, bs::system_category() };
}
if (r == -ECANCELED) {
- auto ec = update(y);
+ auto ec = update(dpp, y);
if (ec) {
return ec;
} else {
return {};
}
-bs::error_code logback_generations::new_backing(log_type type,
+bs::error_code logback_generations::new_backing(const DoutPrefixProvider *dpp,
+ log_type type,
optional_yield y) noexcept {
- auto cct = static_cast<CephContext*>(ioctx.cct());
static constexpr auto max_tries = 10;
try {
- auto ec = update(y);
+ auto ec = update(dpp, y);
if (ec) return ec;
auto tries = 0;
entries_t new_entries;
new_entries.emplace(newgenid, newgen);
auto es = entries_;
es.emplace(newgenid, std::move(newgen));
- ec = write(std::move(es), std::move(l), y);
+ ec = write(dpp, std::move(es), std::move(l), y);
++tries;
} while (ec == bs::errc::operation_canceled &&
tries < max_tries);
if (tries >= max_tries) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": exhausted retry attempts." << dendl;
return ec;
}
if (ec) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": write failed with ec=" << ec.message() << dendl;
return ec;
}
cb::list bl, rbl;
- auto r = rgw_rados_notify(ioctx, oid, bl, 10'000, &rbl, y);
+ auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": notify failed with r=" << r << dendl;
return { -r, bs::system_category() };
}
return {};
}
-bs::error_code logback_generations::empty_to(uint64_t gen_id,
+bs::error_code logback_generations::empty_to(const DoutPrefixProvider *dpp,
+ uint64_t gen_id,
optional_yield y) noexcept {
- auto cct = static_cast<CephContext*>(ioctx.cct());
static constexpr auto max_tries = 10;
try {
- auto ec = update(y);
+ auto ec = update(dpp, y);
if (ec) return ec;
auto tries = 0;
uint64_t newtail = 0;
{
auto last = entries_.end() - 1;
if (gen_id >= last->first) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": Attempt to trim beyond the possible." << dendl;
return bs::error_code(EINVAL, bs::system_category());
}
newtail = i->first;
i->second.pruned = ceph::real_clock::now();
}
- ec = write(std::move(es), std::move(l), y);
+ ec = write(dpp, std::move(es), std::move(l), y);
++tries;
} while (ec == bs::errc::operation_canceled &&
tries < max_tries);
if (tries >= max_tries) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": exhausted retry attempts." << dendl;
return ec;
}
if (ec) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": write failed with ec=" << ec.message() << dendl;
return ec;
}
cb::list bl, rbl;
- auto r = rgw_rados_notify(ioctx, oid, bl, 10'000, &rbl, y);
+ auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y);
if (r < 0) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": notify failed with r=" << r << dendl;
return { -r, bs::system_category() };
}
return {};
}
-bs::error_code logback_generations::remove_empty(optional_yield y) noexcept {
- auto cct = static_cast<CephContext*>(ioctx.cct());
+bs::error_code logback_generations::remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept {
static constexpr auto max_tries = 10;
try {
- auto ec = update(y);
+ auto ec = update(dpp, y);
if (ec) return ec;
auto tries = 0;
entries_t new_entries;
auto es2 = entries_;
for (const auto& [gen_id, e] : es) {
ceph_assert(e.pruned);
- auto ec = log_remove(ioctx, shards,
+ auto ec = log_remove(dpp, ioctx, shards,
[this, gen_id=gen_id](int shard) {
return this->get_oid(gen_id, shard);
}, (gen_id == 0), y);
if (ec) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": Error pruning: gen_id=" << gen_id
<< " ec=" << ec.message() << dendl;
}
}
l.lock();
es.clear();
- ec = write(std::move(es2), std::move(l), y);
+ ec = write(dpp, std::move(es2), std::move(l), y);
++tries;
} while (ec == bs::errc::operation_canceled &&
tries < max_tries);
if (tries >= max_tries) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": exhausted retry attempts." << dendl;
return ec;
}
if (ec) {
- lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
+ ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": write failed with ec=" << ec.message() << dendl;
return ec;
}
bufferlist& bl)
{
auto cct = static_cast<CephContext*>(ioctx.cct());
+ const DoutPrefix dp(cct, dout_subsys, "logback generations handle_notify: ");
if (notifier_id != my_id) {
- auto ec = update(null_yield);
+ auto ec = update(&dp, null_yield);
if (ec) {
lderr(cct)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
/// Look over the shards in a log and determine the type.
tl::expected<log_type, bs::error_code>
-log_backing_type(librados::IoCtx& ioctx,
+log_backing_type(const DoutPrefixProvider *dpp,
+ librados::IoCtx& ioctx,
log_type def,
int shards, //< Total number of shards
/// A function taking a shard number and
entries_t entries_;
tl::expected<std::pair<entries_t, obj_version>, bs::error_code>
- read(optional_yield y) noexcept;
- bs::error_code write(entries_t&& e, std::unique_lock<std::mutex>&& l_,
+ read(const DoutPrefixProvider *dpp, optional_yield y) noexcept;
+ bs::error_code write(const DoutPrefixProvider *dpp, entries_t&& e, std::unique_lock<std::mutex>&& l_,
optional_yield y) noexcept;
- bs::error_code setup(log_type def, optional_yield y) noexcept;
+ bs::error_code setup(const DoutPrefixProvider *dpp, log_type def, optional_yield y) noexcept;
bs::error_code watch() noexcept;
template<typename T, typename... Args>
static tl::expected<std::unique_ptr<T>, bs::error_code>
- init(librados::IoCtx& ioctx_, std::string oid_,
+ init(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx_, std::string oid_,
fu2::unique_function<std::string(uint64_t, int) const>&& get_oid_,
int shards_, log_type def, optional_yield y,
Args&& ...args) noexcept {
shards_, std::forward<Args>(args)...);
std::unique_ptr<T> lg(lgp);
lgp = nullptr;
- auto ec = lg->setup(def, y);
+ auto ec = lg->setup(dpp, def, y);
if (ec)
return tl::unexpected(ec);
// Obnoxiousness for C++ Compiler in Bionic Beaver
}
}
- bs::error_code update(optional_yield y) noexcept;
+ bs::error_code update(const DoutPrefixProvider *dpp, optional_yield y) noexcept;
entries_t entries() const {
return entries_;
}
- bs::error_code new_backing(log_type type, optional_yield y) noexcept;
+ bs::error_code new_backing(const DoutPrefixProvider *dpp, log_type type, optional_yield y) noexcept;
- bs::error_code empty_to(uint64_t gen_id, optional_yield y) noexcept;
+ bs::error_code empty_to(const DoutPrefixProvider *dpp, uint64_t gen_id, optional_yield y) noexcept;
- bs::error_code remove_empty(optional_yield y) noexcept;
+ bs::error_code remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept;
// Callbacks, to be defined by descendant.
std::mutex m;
std::unique_ptr<rgw::cls::fifo::FIFO> fifo;
- int lazy_init(optional_yield y) {
+ int lazy_init(const DoutPrefixProvider *dpp, optional_yield y) {
std::unique_lock l(m);
if (fifo) return 0;
- auto r = rgw::cls::fifo::FIFO::create(ioctx, oid, &fifo, y);
+ auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid, &fifo, y);
if (r) {
fifo.reset();
}
LazyFIFO(librados::IoCtx& ioctx, std::string oid)
: ioctx(ioctx), oid(std::move(oid)) {}
- int read_meta(optional_yield y) {
- auto r = lazy_init(y);
+ int read_meta(const DoutPrefixProvider *dpp, optional_yield y) {
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- return fifo->read_meta(y);
+ return fifo->read_meta(dpp, y);
}
- int meta(rados::cls::fifo::info& info, optional_yield y) {
- auto r = lazy_init(y);
+ int meta(const DoutPrefixProvider *dpp, rados::cls::fifo::info& info, optional_yield y) {
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
info = fifo->meta();
return 0;
}
- int get_part_layout_info(std::uint32_t& part_header_size,
+ int get_part_layout_info(const DoutPrefixProvider *dpp,
+ std::uint32_t& part_header_size,
std::uint32_t& part_entry_overhead,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
std::tie(part_header_size, part_entry_overhead)
= fifo->get_part_layout_info();
return 0;
}
- int push(const ceph::buffer::list& bl,
+ int push(const DoutPrefixProvider *dpp,
+ const ceph::buffer::list& bl,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- return fifo->push(bl, y);
+ return fifo->push(dpp, bl, y);
}
- int push(ceph::buffer::list& bl,
+ int push(const DoutPrefixProvider *dpp,
+ ceph::buffer::list& bl,
librados::AioCompletion* c,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- fifo->push(bl, c);
+ fifo->push(dpp, bl, c);
return 0;
}
- int push(const std::vector<ceph::buffer::list>& data_bufs,
+ int push(const DoutPrefixProvider *dpp,
+ const std::vector<ceph::buffer::list>& data_bufs,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- return fifo->push(data_bufs, y);
+ return fifo->push(dpp, data_bufs, y);
}
- int push(const std::vector<ceph::buffer::list>& data_bufs,
+ int push(const DoutPrefixProvider *dpp,
+ const std::vector<ceph::buffer::list>& data_bufs,
librados::AioCompletion* c,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- fifo->push(data_bufs, c);
+ fifo->push(dpp, data_bufs, c);
return 0;
}
- int list(int max_entries, std::optional<std::string_view> markstr,
+ int list(const DoutPrefixProvider *dpp,
+ int max_entries, std::optional<std::string_view> markstr,
std::vector<rgw::cls::fifo::list_entry>* out,
bool* more, optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- return fifo->list(max_entries, markstr, out, more, y);
+ return fifo->list(dpp, max_entries, markstr, out, more, y);
}
- int list(int max_entries, std::optional<std::string_view> markstr,
+ int list(const DoutPrefixProvider *dpp, int max_entries, std::optional<std::string_view> markstr,
std::vector<rgw::cls::fifo::list_entry>* out, bool* more,
librados::AioCompletion* c, optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- fifo->list(max_entries, markstr, out, more, c);
+ fifo->list(dpp, max_entries, markstr, out, more, c);
return 0;
}
- int trim(std::string_view markstr, bool exclusive, optional_yield y) {
- auto r = lazy_init(y);
+ int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y) {
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- return fifo->trim(markstr, exclusive, y);
+ return fifo->trim(dpp, markstr, exclusive, y);
}
- int trim(std::string_view markstr, bool exclusive, librados::AioCompletion* c,
+ int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, librados::AioCompletion* c,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- fifo->trim(markstr, exclusive, c);
+ fifo->trim(dpp, markstr, exclusive, c);
return 0;
}
- int get_part_info(int64_t part_num, rados::cls::fifo::part_header* header,
+ int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- return fifo->get_part_info(part_num, header, y);
+ return fifo->get_part_info(dpp, part_num, header, y);
}
- int get_part_info(int64_t part_num, rados::cls::fifo::part_header* header,
+ int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header,
librados::AioCompletion* c, optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
fifo->get_part_info(part_num, header, c);
return 0;
}
- int get_head_info(fu2::unique_function<
+ int get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function<
void(int r, rados::cls::fifo::part_header&&)>&& f,
librados::AioCompletion* c,
optional_yield y) {
- auto r = lazy_init(y);
+ auto r = lazy_init(dpp, y);
if (r < 0) return r;
- fifo->get_head_info(std::move(f), c);
+ fifo->get_head_info(dpp, std::move(f), c);
return 0;
}
};
namespace bp = boost::process;
-int add_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation) {
- // verify that luarocks can load this oackage
+int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation) {
+ // verify that luarocks can load this package
const auto p = bp::search_path("luarocks");
if (p.empty()) {
return -ECHILD;
std::map<std::string, bufferlist> new_package{{package_name, empty_bl}};
librados::ObjectWriteOperation op;
op.omap_set(new_package);
- ret = rgw_rados_operate(*(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_lc_pool_ctx()),
+ ret = rgw_rados_operate(dpp, *(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_lc_pool_ctx()),
PACKAGE_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
return 0;
}
-int remove_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name) {
+int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name) {
librados::ObjectWriteOperation op;
op.omap_rm_keys(std::set<std::string>({package_name}));
- const auto ret = rgw_rados_operate(*(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_lc_pool_ctx()),
+ const auto ret = rgw_rados_operate(dpp, *(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_lc_pool_ctx()),
PACKAGE_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
return 0;
}
-int list_packages(rgw::sal::Store* store, optional_yield y, packages_t& packages) {
+int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages) {
constexpr auto max_chunk = 1024U;
std::string start_after;
bool more = true;
librados::ObjectReadOperation op;
packages_t packages_chunk;
op.omap_get_keys2(start_after, max_chunk, &packages_chunk, &more, &rval);
- const auto ret = rgw_rados_operate(*(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_lc_pool_ctx()),
+ const auto ret = rgw_rados_operate(dpp, *(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_lc_pool_ctx()),
PACKAGE_LIST_OBJECT_NAME, &op, nullptr, y);
if (ret < 0) {
return 0;
}
-int install_packages(rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output) {
+int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output) {
// luarocks directory cleanup
boost::system::error_code ec;
const auto& luarocks_path = store->get_luarocks_path();
}
packages_t packages;
- auto ret = list_packages(store, y, packages);
+ auto ret = list_packages(dpp, store, y, packages);
if (ret == -ENOENT) {
// allowlist is empty
return 0;
using packages_t = std::set<std::string>;
// add a lua package to the allowlist
-int add_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation);
+int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation);
// remove a lua package from the allowlist
-int remove_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name);
+int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name);
// list lua packages in the allowlist
-int list_packages(rgw::sal::Store* store, optional_yield y, packages_t& packages);
+int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages);
// install all packages from the allowlist
// return the list of packages that failed to install and the output of the install command
-int install_packages(rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output);
+int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output);
#endif
}
const auto rc = rgw_log_op(store, rest, s, op_name, olog);
lua_pushinteger(L, rc);
} else {
- ldout(s->cct, 1) << "Lua ERROR: missing rados store, cannot use ops log" << dendl;
+ ldpp_dout(s, 1) << "Lua ERROR: missing rados store, cannot use ops log" << dendl;
lua_pushinteger(L, -EINVAL);
}
// execute the lua script
if (luaL_dostring(L, script.c_str()) != LUA_OK) {
const std::string err(lua_tostring(L, -1));
- ldout(s->cct, 1) << "Lua ERROR: " << err << dendl;
+ ldpp_dout(s, 1) << "Lua ERROR: " << err << dendl;
return -1;
}
} catch (const std::runtime_error& e) {
- ldout(s->cct, 1) << "Lua ERROR: " << e.what() << dendl;
+ ldpp_dout(s, 1) << "Lua ERROR: " << e.what() << dendl;
return -1;
}
#ifdef WITH_RADOSGW_LUA_PACKAGES
rgw::lua::packages_t failed_packages;
std::string output;
- r = rgw::lua::install_packages(store, null_yield, failed_packages, output);
+ r = rgw::lua::install_packages(&dp, store, null_yield, failed_packages, output);
if (r < 0) {
dout(1) << "ERROR: failed to install lua packages from allowlist" << dendl;
}
// add a watcher to respond to realm configuration changes
- RGWPeriodPusher pusher(store, null_yield);
+ RGWPeriodPusher pusher(&dp, store, null_yield);
RGWFrontendPauser pauser(fes, implicit_tenant_context, &pusher);
auto reloader = std::make_unique<RGWRealmReloader>(store,
service_map_meta, &pauser);
- RGWRealmWatcher realm_watcher(g_ceph_context, store->get_zone()->get_realm());
+ RGWRealmWatcher realm_watcher(&dp, g_ceph_context, store->get_zone()->get_realm());
realm_watcher.add_watcher(RGWRealmNotify::Reload, *reloader);
realm_watcher.add_watcher(RGWRealmNotify::ZonesNeedPeriod, pusher);
oid = prefix + buf;
}
- int add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl);
+ int add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl);
int get_shard_id(const string& hash_key, int *shard_id);
- int store_entries_in_shard(list<cls_log_entry>& entries, int shard_id, librados::AioCompletion *completion);
+ int store_entries_in_shard(const DoutPrefixProvider *dpp, list<cls_log_entry>& entries, int shard_id, librados::AioCompletion *completion);
struct LogListCtx {
int cur_shard;
const real_time& end_time, const string& marker,
void **handle);
void complete_list_entries(void *handle);
- int list_entries(void *handle,
+ int list_entries(const DoutPrefixProvider *dpp,
+ void *handle,
int max_entries,
list<cls_log_entry>& entries,
string *out_marker,
bool *truncated);
- int trim(int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker);
- int get_info(int shard_id, RGWMetadataLogInfo *info);
- int get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion);
- int lock_exclusive(int shard_id, timespan duration, string&zone_id, string& owner_id);
- int unlock(int shard_id, string& zone_id, string& owner_id);
+ int trim(const DoutPrefixProvider *dpp, int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker);
+ int get_info(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfo *info);
+ int get_info_async(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfoCompletion *completion);
+ int lock_exclusive(const DoutPrefixProvider *dpp, int shard_id, timespan duration, string&zone_id, string& owner_id);
+ int unlock(const DoutPrefixProvider *dpp, int shard_id, string& zone_id, string& owner_id);
int update_shards(list<int>& shards);
}
-int RGWMetadataLog::add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl) {
+int RGWMetadataLog::add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl) {
if (!svc.zone->need_to_log_metadata())
return 0;
rgw_shard_name(prefix, cct->_conf->rgw_md_log_max_shards, hash_key, oid, &shard_id);
mark_modified(shard_id);
real_time now = real_clock::now();
- return svc.cls->timelog.add(oid, now, section, key, bl, null_yield);
+ return svc.cls->timelog.add(dpp, oid, now, section, key, bl, null_yield);
}
int RGWMetadataLog::get_shard_id(const string& hash_key, int *shard_id)
return 0;
}
-int RGWMetadataLog::store_entries_in_shard(list<cls_log_entry>& entries, int shard_id, librados::AioCompletion *completion)
+int RGWMetadataLog::store_entries_in_shard(const DoutPrefixProvider *dpp, list<cls_log_entry>& entries, int shard_id, librados::AioCompletion *completion)
{
string oid;
mark_modified(shard_id);
rgw_shard_name(prefix, shard_id, oid);
- return svc.cls->timelog.add(oid, entries, completion, false, null_yield);
+ return svc.cls->timelog.add(dpp, oid, entries, completion, false, null_yield);
}
void RGWMetadataLog::init_list_entries(int shard_id, const real_time& from_time, const real_time& end_time,
delete ctx;
}
-int RGWMetadataLog::list_entries(void *handle,
+int RGWMetadataLog::list_entries(const DoutPrefixProvider *dpp, void *handle,
int max_entries,
list<cls_log_entry>& entries,
string *last_marker,
}
std::string next_marker;
- int ret = svc.cls->timelog.list(ctx->cur_oid, ctx->from_time, ctx->end_time,
+ int ret = svc.cls->timelog.list(dpp, ctx->cur_oid, ctx->from_time, ctx->end_time,
max_entries, entries, ctx->marker,
&next_marker, truncated, null_yield);
if ((ret < 0) && (ret != -ENOENT))
return 0;
}
-int RGWMetadataLog::get_info(int shard_id, RGWMetadataLogInfo *info)
+int RGWMetadataLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfo *info)
{
string oid;
get_shard_oid(shard_id, oid);
cls_log_header header;
- int ret = svc.cls->timelog.info(oid, &header, null_yield);
+ int ret = svc.cls->timelog.info(dpp, oid, &header, null_yield);
if ((ret < 0) && (ret != -ENOENT))
return ret;
completion->release();
}
-int RGWMetadataLog::get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion)
+int RGWMetadataLog::get_info_async(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfoCompletion *completion)
{
string oid;
get_shard_oid(shard_id, oid);
completion->get(); // hold a ref until the completion fires
- return svc.cls->timelog.info_async(completion->get_io_obj(), oid,
+ return svc.cls->timelog.info_async(dpp, completion->get_io_obj(), oid,
&completion->get_header(),
completion->get_completion());
}
-int RGWMetadataLog::trim(int shard_id, const real_time& from_time, const real_time& end_time,
+int RGWMetadataLog::trim(const DoutPrefixProvider *dpp, int shard_id, const real_time& from_time, const real_time& end_time,
const string& start_marker, const string& end_marker)
{
string oid;
get_shard_oid(shard_id, oid);
- return svc.cls->timelog.trim(oid, from_time, end_time, start_marker,
+ return svc.cls->timelog.trim(dpp, oid, from_time, end_time, start_marker,
end_marker, nullptr, null_yield);
}
-int RGWMetadataLog::lock_exclusive(int shard_id, timespan duration, string& zone_id, string& owner_id) {
+int RGWMetadataLog::lock_exclusive(const DoutPrefixProvider *dpp, int shard_id, timespan duration, string& zone_id, string& owner_id) {
string oid;
get_shard_oid(shard_id, oid);
- return svc.cls->lock.lock_exclusive(svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id);
+ return svc.cls->lock.lock_exclusive(dpp, svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id);
}
-int RGWMetadataLog::unlock(int shard_id, string& zone_id, string& owner_id) {
+int RGWMetadataLog::unlock(const DoutPrefixProvider *dpp, int shard_id, string& zone_id, string& owner_id) {
string oid;
get_shard_oid(shard_id, oid);
- return svc.cls->lock.unlock(svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id);
+ return svc.cls->lock.unlock(dpp, svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id);
}
void RGWMetadataLog::mark_modified(int shard_id)
return -ENOTSUP;
}
- int list_keys_init(const string& marker, void **phandle) override {
+ int list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) override {
iter_data *data = new iter_data;
list<string> sections;
mgr->get_sections(sections);
int RGWMetadataHandlerPut_SObj::put(const DoutPrefixProvider *dpp)
{
- int ret = put_check();
+ int ret = put_check(dpp);
if (ret != 0) {
return ret;
}
});
}
-int RGWMetadataHandler_GenericMetaBE::list_keys_init(const string& marker, void **phandle)
+int RGWMetadataHandler_GenericMetaBE::list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle)
{
auto op = std::make_unique<RGWSI_MetaBackend_Handler::Op_ManagedCtx>(be_handler);
- int ret = op->list_init(marker);
+ int ret = op->list_init(dpp, marker);
if (ret < 0) {
return ret;
}
RGWMetadataHandler *handler;
};
-int RGWMetadataManager::list_keys_init(const string& section, void **handle)
+int RGWMetadataManager::list_keys_init(const DoutPrefixProvider *dpp, const string& section, void **handle)
{
- return list_keys_init(section, string(), handle);
+ return list_keys_init(dpp, section, string(), handle);
}
-int RGWMetadataManager::list_keys_init(const string& section,
+int RGWMetadataManager::list_keys_init(const DoutPrefixProvider *dpp, const string& section,
const string& marker, void **handle)
{
string entry;
list_keys_handle *h = new list_keys_handle;
h->handler = handler;
- ret = handler->list_keys_init(marker, &h->handle);
+ ret = handler->list_keys_init(dpp, marker, &h->handle);
if (ret < 0) {
delete h;
return ret;
RGWMDLogStatus op_type,
std::function<int()> f) = 0;
- virtual int list_keys_init(const string& marker, void **phandle) = 0;
+ virtual int list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) = 0;
virtual int list_keys_next(void *handle, int max, list<string>& keys, bool *truncated) = 0;
virtual void list_keys_complete(void *handle) = 0;
int get_shard_id(const string& entry, int *shard_id) override;
- int list_keys_init(const std::string& marker, void **phandle) override;
+ int list_keys_init(const DoutPrefixProvider *dpp, const std::string& marker, void **phandle) override;
int list_keys_next(void *handle, int max, std::list<string>& keys, bool *truncated) override;
void list_keys_complete(void *handle) override;
RGWMDLogStatus op_type,
std::function<int()> f);
- int list_keys_init(const string& section, void **phandle);
- int list_keys_init(const string& section, const string& marker, void **phandle);
+ int list_keys_init(const DoutPrefixProvider *dpp, const string& section, void **phandle);
+ int list_keys_init(const DoutPrefixProvider *dpp, const string& section, const string& marker, void **phandle);
int list_keys_next(void *handle, int max, list<string>& keys, bool *truncated);
void list_keys_complete(void *handle);
int put_pre(const DoutPrefixProvider *dpp) override;
int put(const DoutPrefixProvider *dpp) override;
- virtual int put_check() {
+ virtual int put_check(const DoutPrefixProvider *dpp) {
return 0;
}
virtual int put_checked(const DoutPrefixProvider *dpp);
(strncmp(uid, MULTIPART_UPLOAD_ID_PREFIX_LEGACY, sizeof(MULTIPART_UPLOAD_ID_PREFIX_LEGACY) - 1) == 0);
}
-int list_multipart_parts(rgw::sal::Bucket* bucket,
+int list_multipart_parts(const DoutPrefixProvider *dpp, rgw::sal::Bucket* bucket,
CephContext *cct,
const string& upload_id,
const string& meta_oid, int num_parts,
snprintf(buf, sizeof(buf), "%08d", marker);
p.append(buf);
- ret = obj->omap_get_vals(p, num_parts + 1, &parts_map,
+ ret = obj->omap_get_vals(dpp, p, num_parts + 1, &parts_map,
nullptr, null_yield);
} else {
- ret = obj->omap_get_all(&parts_map, null_yield);
+ ret = obj->omap_get_all(dpp, &parts_map, null_yield);
}
if (ret < 0) {
return ret;
try {
decode(info, bli);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: could not part info, caught buffer::error" <<
+ ldpp_dout(dpp, 0) << "ERROR: could not part info, caught buffer::error" <<
dendl;
return -EIO;
}
* where one gateway doesn't support correctly sorted omap
* keys for multipart upload just assume data is unsorted.
*/
- return list_multipart_parts(bucket, cct, upload_id,
+ return list_multipart_parts(dpp, bucket, cct, upload_id,
meta_oid, num_parts, marker, parts,
next_marker, truncated, true);
}
return 0;
}
-int list_multipart_parts(struct req_state *s,
+int list_multipart_parts(const DoutPrefixProvider *dpp, struct req_state *s,
const string& upload_id,
const string& meta_oid, int num_parts,
int marker, map<uint32_t, RGWUploadPartInfo>& parts,
int *next_marker, bool *truncated,
bool assume_unsorted)
{
- return list_multipart_parts(s->bucket.get(), s->cct, upload_id,
+ return list_multipart_parts(dpp, s->bucket.get(), s->cct, upload_id,
meta_oid, num_parts, marker, parts,
next_marker, truncated, assume_unsorted);
}
uint64_t parts_accounted_size = 0;
do {
- ret = list_multipart_parts(bucket, cct,
+ ret = list_multipart_parts(dpp, bucket, cct,
mp_obj.get_upload_id(), mp_obj.get_meta(),
1000, marker, obj_parts, &marker, &truncated);
if (ret < 0) {
if (ret < 0 && ret != -ENOENT)
return ret;
} else {
- chain->update(&obj_part.manifest);
- RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin();
- if (oiter != obj_part.manifest.obj_end()) {
+ chain->update(dpp, &obj_part.manifest);
+ RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin(dpp);
+ if (oiter != obj_part.manifest.obj_end(dpp)) {
std::unique_ptr<rgw::sal::Object> head = bucket->get_object(rgw_obj_key());
rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store);
head->raw_obj_to_obj(raw_head);
return -ERR_NO_SUCH_UPLOAD;
}
//Delete objects inline if send chain to gc fails
- chain->delete_inline(mp_obj.get_upload_id());
+ chain->delete_inline(dpp, mp_obj.get_upload_id());
}
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = meta_obj->get_delete_op(obj_ctx);
#include "rgw_xml.h"
#include "rgw_obj_manifest.h"
#include "rgw_compression_types.h"
+#include "common/dout.h"
namespace rgw { namespace sal {
class Store;
extern bool is_v2_upload_id(const string& upload_id);
-extern int list_multipart_parts(rgw::sal::Bucket* bucket,
+extern int list_multipart_parts(const DoutPrefixProvider *dpp,
+ rgw::sal::Bucket* bucket,
CephContext *cct,
const string& upload_id,
const string& meta_oid, int num_parts,
int *next_marker, bool *truncated,
bool assume_unsorted = false);
-extern int list_multipart_parts(struct req_state *s,
+extern int list_multipart_parts(const DoutPrefixProvider *dpp,
+ struct req_state *s,
const string& upload_id,
const string& meta_oid, int num_parts,
int marker, map<uint32_t, RGWUploadPartInfo>& parts,
librados::ObjectReadOperation op;
queues_t queues_chunk;
op.omap_get_keys2(start_after, max_chunk, &queues_chunk, &more, &rval);
- const auto ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, nullptr, y);
+ const auto ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, nullptr, y);
if (ret == -ENOENT) {
// queue list object was not created - nothing to do
return 0;
"" /*no tag*/);
cls_2pc_queue_expire_reservations(op, stale_time);
// check ownership and do reservation cleanup in one batch
- auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
+ auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
if (ret == -ENOENT) {
// queue was deleted
ldpp_dout(this, 5) << "INFO: queue: "
"" /*no tag*/);
cls_2pc_queue_list_entries(op, start_marker, max_elements, &obl, &rval);
// check ownership and list entries in one batch
- auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, nullptr, optional_yield(io_context, yield));
+ auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, nullptr, optional_yield(io_context, yield));
if (ret == -ENOENT) {
// queue was deleted
ldpp_dout(this, 5) << "INFO: queue: "
"" /*no tag*/);
cls_2pc_queue_remove_entries(op, end_marker);
// check ownership and deleted entries in one batch
- const auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
+ const auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
if (ret == -ENOENT) {
// queue was deleted
ldpp_dout(this, 5) << "INFO: queue: "
failover_time,
LOCK_FLAG_MAY_RENEW);
- ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
+ ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
if (ret == -EBUSY) {
// lock is already taken by another RGW
ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " owned (locked) by another daemon" << dendl;
librados::ObjectWriteOperation op;
op.create(true);
cls_2pc_queue_init(op, topic_name, max_queue_size);
- auto ret = rgw_rados_operate(rados_ioctx, topic_name, &op, y);
+ auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y);
if (ret == -EEXIST) {
// queue already exists - nothing to do
ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already exists. nothing to do" << dendl;
bufferlist empty_bl;
std::map<std::string, bufferlist> new_topic{{topic_name, empty_bl}};
op.omap_set(new_topic);
- ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
+ ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
ldpp_dout(this, 1) << "ERROR: failed to add queue: " << topic_name << " to queue list. error: " << ret << dendl;
return ret;
int remove_persistent_topic(const std::string& topic_name, optional_yield y) {
librados::ObjectWriteOperation op;
op.remove();
- auto ret = rgw_rados_operate(rados_ioctx, topic_name, &op, y);
+ auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y);
if (ret == -ENOENT) {
// queue already removed - nothing to do
ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already removed. nothing to do" << dendl;
std::set<std::string> topic_to_remove{{topic_name}};
op.omap_rm_keys(topic_to_remove);
- ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
+ ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
ldpp_dout(this, 1) << "ERROR: failed to remove queue: " << topic_name << " from queue list. error: " << ret << dendl;
return ret;
return true;
}
-int publish_reserve(EventType event_type,
+int publish_reserve(const DoutPrefixProvider *dpp, EventType event_type,
reservation_t& res,
const RGWObjTags* req_tags)
{
// notification does not apply to req_state
continue;
}
- ldout(res.s->cct, 20) << "INFO: notification: '" << topic_filter.s3_id <<
+ ldpp_dout(dpp, 20) << "INFO: notification: '" << topic_filter.s3_id <<
"' on topic: '" << topic_cfg.dest.arn_topic <<
"' and bucket: '" << res.s->bucket->get_name() <<
"' (unique topic: '" << topic_cfg.name <<
int rval;
const auto& queue_name = topic_cfg.dest.arn_topic;
cls_2pc_queue_reserve(op, res.size, 1, &obl, &rval);
- auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(),
+ auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op, res.s->yield, librados::OPERATION_RETURNVEC);
if (ret < 0) {
- ldout(res.s->cct, 1) << "ERROR: failed to reserve notification on queue: " << queue_name
+ ldpp_dout(dpp, 1) << "ERROR: failed to reserve notification on queue: " << queue_name
<< ". error: " << ret << dendl;
// if no space is left in queue we ask client to slow down
return (ret == -ENOSPC) ? -ERR_RATE_LIMITED : ret;
}
ret = cls_2pc_queue_reserve_result(obl, res_id);
if (ret < 0) {
- ldout(res.s->cct, 1) << "ERROR: failed to parse reservation id. error: " << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to parse reservation id. error: " << ret << dendl;
return ret;
}
}
// first cancel the existing reservation
librados::ObjectWriteOperation op;
cls_2pc_queue_abort(op, topic.res_id);
- auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(),
+ auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(),
topic.cfg.dest.arn_topic, &op,
res.s->yield);
if (ret < 0) {
bufferlist obl;
int rval;
cls_2pc_queue_reserve(op, bl.length(), 1, &obl, &rval);
- ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(),
+ ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op, res.s->yield, librados::OPERATION_RETURNVEC);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to reserve extra space on queue: " << queue_name
std::vector<bufferlist> bl_data_vec{std::move(bl)};
librados::ObjectWriteOperation op;
cls_2pc_queue_commit(op, bl_data_vec, topic.res_id);
- const auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(),
+ const auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op,
res.s->yield);
topic.res_id = cls_2pc_reservation::NO_ID;
return 0;
}
-int publish_abort(reservation_t& res) {
+int publish_abort(const DoutPrefixProvider *dpp, reservation_t& res) {
for (auto& topic : res.topics) {
if (!topic.cfg.dest.persistent || topic.res_id == cls_2pc_reservation::NO_ID) {
// nothing to abort or already committed/aborted
const auto& queue_name = topic.cfg.dest.arn_topic;
librados::ObjectWriteOperation op;
cls_2pc_queue_abort(op, topic.res_id);
- const auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(),
+ const auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op,
res.s->yield);
if (ret < 0) {
- ldout(res.s->cct, 1) << "ERROR: failed to abort reservation: " << topic.res_id <<
+ ldpp_dout(dpp, 1) << "ERROR: failed to abort reservation: " << topic.res_id <<
" from queue: " << queue_name << ". error: " << ret << dendl;
return ret;
}
}
reservation_t::~reservation_t() {
- publish_abort(*this);
+ publish_abort(dpp, *this);
}
}
cls_2pc_reservation::id_t res_id;
};
+ const DoutPrefixProvider *dpp;
std::vector<topic_t> topics;
rgw::sal::RadosStore* const store;
const req_state* const s;
size_t size;
rgw::sal::Object* const object;
- reservation_t(rgw::sal::RadosStore* _store, const req_state* _s, rgw::sal::Object* _object) :
- store(_store), s(_s), object(_object) {}
+ reservation_t(const DoutPrefixProvider *_dpp, rgw::sal::RadosStore* _store, const req_state* _s, rgw::sal::Object* _object) :
+ dpp(_dpp), store(_store), s(_s), object(_object) {}
// dtor doing resource leak guarding
// aborting the reservation if not already committed or aborted
};
// create a reservation on the 2-phase-commit queue
-int publish_reserve(EventType event_type,
+int publish_reserve(const DoutPrefixProvider *dpp,
+ EventType event_type,
reservation_t& reservation,
const RGWObjTags* req_tags);
const DoutPrefixProvider *dpp);
// cancel the reservation
-int publish_abort(reservation_t& reservation);
+int publish_abort(const DoutPrefixProvider *dpp, reservation_t& reservation);
}
#include "services/svc_zone.h"
#include "services/svc_tier_rados.h"
+#include "rgw_rados.h" // RGW_OBJ_NS_SHADOW and RGW_OBJ_NS_MULTIPART
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, NULL, &cur_obj);
- manifest->update_iterators();
-
return 0;
}
-const RGWObjManifest::obj_iterator& RGWObjManifest::obj_begin()
-{
- return begin_iter;
-}
-
-const RGWObjManifest::obj_iterator& RGWObjManifest::obj_end()
-{
- return end_iter;
-}
-
-RGWObjManifest::obj_iterator RGWObjManifest::obj_find(uint64_t ofs)
-{
- if (ofs > obj_size) {
- ofs = obj_size;
- }
- RGWObjManifest::obj_iterator iter(this);
- iter.seek(ofs);
- return iter;
-}
-
-int RGWObjManifest::append(RGWObjManifest& m, const RGWZoneGroup& zonegroup,
+int RGWObjManifest::append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup,
const RGWZoneParams& zone_params)
{
if (explicit_objs || m.explicit_objs) {
- return append_explicit(m, zonegroup, zone_params);
+ return append_explicit(dpp, m, zonegroup, zone_params);
}
if (rules.empty()) {
map<uint64_t, RGWObjManifestRule>::iterator miter = m.rules.begin();
if (miter == m.rules.end()) {
- return append_explicit(m, zonegroup, zone_params);
+ return append_explicit(dpp, m, zonegroup, zone_params);
}
for (; miter != m.rules.end(); ++miter) {
return 0;
}
-int RGWObjManifest::append(RGWObjManifest& m, rgw::sal::Zone* zone_svc)
+int RGWObjManifest::append(const DoutPrefixProvider *dpp, RGWObjManifest& m, rgw::sal::Zone* zone_svc)
{
- return append(m, zone_svc->get_zonegroup(), zone_svc->get_params());
+ return append(dpp, m, zone_svc->get_zonegroup(), zone_svc->get_params());
}
void RGWObjManifest::append_rules(RGWObjManifest& m, map<uint64_t, RGWObjManifestRule>::iterator& miter,
}
}
-void RGWObjManifest::convert_to_explicit(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params)
+void RGWObjManifest::convert_to_explicit(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params)
{
if (explicit_objs) {
return;
}
- obj_iterator iter = obj_begin();
+ obj_iterator iter = obj_begin(dpp);
- while (iter != obj_end()) {
+ while (iter != obj_end(dpp)) {
RGWObjManifestPart& part = objs[iter.get_stripe_ofs()];
const rgw_obj_select& os = iter.get_location();
const rgw_raw_obj& raw_loc = os.get_raw_obj(zonegroup, zone_params);
prefix.clear();
}
-int RGWObjManifest::append_explicit(RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params)
+int RGWObjManifest::append_explicit(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params)
{
if (!explicit_objs) {
- convert_to_explicit(zonegroup, zone_params);
+ convert_to_explicit(dpp, zonegroup, zone_params);
}
if (!m.explicit_objs) {
- m.convert_to_explicit(zonegroup, zone_params);
+ m.convert_to_explicit(dpp, zonegroup, zone_params);
}
map<uint64_t, RGWObjManifestPart>::iterator iter;
uint64_t base = obj_size;
/* are we still pointing at the head? */
if (ofs < head_size) {
rule_iter = manifest->rules.begin();
- RGWObjManifestRule *rule = &rule_iter->second;
+ const RGWObjManifestRule *rule = &rule_iter->second;
ofs = std::min(head_size, obj_size);
stripe_ofs = ofs;
cur_stripe = 1;
return;
}
- RGWObjManifestRule *rule = &rule_iter->second;
+ const RGWObjManifestRule *rule = &rule_iter->second;
stripe_ofs += rule->stripe_max_size;
cur_stripe++;
- dout(20) << "RGWObjManifest::operator++(): rule->part_size=" << rule->part_size << " rules.size()=" << manifest->rules.size() << dendl;
+ ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): rule->part_size=" << rule->part_size << " rules.size()=" << manifest->rules.size() << dendl;
if (rule->part_size > 0) {
/* multi part, multi stripes object */
- dout(20) << "RGWObjManifest::operator++(): stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl;
+ ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl;
if (stripe_ofs >= part_ofs + rule->part_size) {
/* moved to the next part */
stripe_size = 0;
}
- dout(20) << "RGWObjManifest::operator++(): result: ofs=" << ofs << " stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl;
+ ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): result: ofs=" << ofs << " stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl;
update_location();
}
// Normal object which not generated through copy operation
manifest->set_tail_instance(_obj.key.instance);
- manifest->update_iterators();
-
return 0;
}
+void RGWObjManifest::obj_iterator::seek(uint64_t o)
+{
+ ofs = o;
+ if (manifest->explicit_objs) {
+ explicit_iter = manifest->objs.upper_bound(ofs);
+ if (explicit_iter != manifest->objs.begin()) {
+ --explicit_iter;
+ }
+ if (ofs < manifest->obj_size) {
+ update_explicit_pos();
+ } else {
+ ofs = manifest->obj_size;
+ }
+ update_location();
+ return;
+ }
+ if (o < manifest->get_head_size()) {
+ rule_iter = manifest->rules.begin();
+ stripe_ofs = 0;
+ stripe_size = manifest->get_head_size();
+ if (rule_iter != manifest->rules.end()) {
+ cur_part_id = rule_iter->second.start_part_num;
+ cur_override_prefix = rule_iter->second.override_prefix;
+ }
+ update_location();
+ return;
+ }
+
+ rule_iter = manifest->rules.upper_bound(ofs);
+ next_rule_iter = rule_iter;
+ if (rule_iter != manifest->rules.begin()) {
+ --rule_iter;
+ }
+
+ if (rule_iter == manifest->rules.end()) {
+ update_location();
+ return;
+ }
+
+ const RGWObjManifestRule& rule = rule_iter->second;
+
+ if (rule.part_size > 0) {
+ cur_part_id = rule.start_part_num + (ofs - rule.start_ofs) / rule.part_size;
+ } else {
+ cur_part_id = rule.start_part_num;
+ }
+ part_ofs = rule.start_ofs + (cur_part_id - rule.start_part_num) * rule.part_size;
+
+ if (rule.stripe_max_size > 0) {
+ cur_stripe = (ofs - part_ofs) / rule.stripe_max_size;
+
+ stripe_ofs = part_ofs + cur_stripe * rule.stripe_max_size;
+ if (!cur_part_id && manifest->get_head_size() > 0) {
+ cur_stripe++;
+ }
+ } else {
+ cur_stripe = 0;
+ stripe_ofs = part_ofs;
+ }
+
+ if (!rule.part_size) {
+ stripe_size = rule.stripe_max_size;
+ stripe_size = std::min(manifest->get_obj_size() - stripe_ofs, stripe_size);
+ } else {
+ uint64_t next = std::min(stripe_ofs + rule.stripe_max_size, part_ofs + rule.part_size);
+ stripe_size = next - stripe_ofs;
+ }
+
+ cur_override_prefix = rule.override_prefix;
+
+ update_location();
+}
+
+void RGWObjManifest::obj_iterator::update_location()
+{
+ if (manifest->explicit_objs) {
+ if (manifest->empty()) {
+ location = rgw_obj_select{};
+ } else {
+ location = explicit_iter->second.loc;
+ }
+ return;
+ }
+
+ if (ofs < manifest->get_head_size()) {
+ location = manifest->get_obj();
+ location.set_placement_rule(manifest->get_head_placement_rule());
+ return;
+ }
+
+ manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, &cur_override_prefix, &location);
+}
+
+void RGWObjManifest::obj_iterator::update_explicit_pos()
+{
+ ofs = explicit_iter->first;
+ stripe_ofs = ofs;
+
+ auto next_iter = explicit_iter;
+ ++next_iter;
+ if (next_iter != manifest->objs.end()) {
+ stripe_size = next_iter->first - ofs;
+ } else {
+ stripe_size = manifest->obj_size - ofs;
+ }
+}
+
+void RGWObjManifest::get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe,
+ uint64_t ofs, string *override_prefix, rgw_obj_select *location) const
+{
+ rgw_obj loc;
+
+ string& oid = loc.key.name;
+ string& ns = loc.key.ns;
+
+ if (!override_prefix || override_prefix->empty()) {
+ oid = prefix;
+ } else {
+ oid = *override_prefix;
+ }
+
+ if (!cur_part_id) {
+ if (ofs < max_head_size) {
+ location->set_placement_rule(head_placement_rule);
+ *location = obj;
+ return;
+ } else {
+ char buf[16];
+ snprintf(buf, sizeof(buf), "%d", (int)cur_stripe);
+ oid += buf;
+ ns = RGW_OBJ_NS_SHADOW;
+ }
+ } else {
+ char buf[32];
+ if (cur_stripe == 0) {
+ snprintf(buf, sizeof(buf), ".%d", (int)cur_part_id);
+ oid += buf;
+ ns= RGW_OBJ_NS_MULTIPART;
+ } else {
+ snprintf(buf, sizeof(buf), ".%d_%d", (int)cur_part_id, (int)cur_stripe);
+ oid += buf;
+ ns = RGW_OBJ_NS_SHADOW;
+ }
+ }
+
+ if (!tail_placement.bucket.name.empty()) {
+ loc.bucket = tail_placement.bucket;
+ } else {
+ loc.bucket = obj.bucket;
+ }
+
+ // Always overwrite instance with tail_instance
+ // to get the right shadow object location
+ loc.key.set_instance(tail_instance);
+
+ location->set_placement_rule(tail_placement.placement_rule);
+ *location = loc;
+}
string tail_instance; /* tail object's instance */
- void convert_to_explicit(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params);
- int append_explicit(RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params);
+ void convert_to_explicit(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params);
+ int append_explicit(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params);
void append_rules(RGWObjManifest& m, map<uint64_t, RGWObjManifestRule>::iterator& iter, string *override_prefix);
- void update_iterators() {
- begin_iter.seek(0);
- end_iter.seek(obj_size);
- }
public:
- RGWObjManifest() : begin_iter(this), end_iter(this) {}
+ RGWObjManifest() = default;
RGWObjManifest(const RGWObjManifest& rhs) {
*this = rhs;
}
tail_placement = rhs.tail_placement;
rules = rhs.rules;
tail_instance = rhs.tail_instance;
-
- begin_iter.set_manifest(this);
- end_iter.set_manifest(this);
-
- begin_iter.seek(rhs.begin_iter.get_ofs());
- end_iter.seek(rhs.end_iter.get_ofs());
-
return *this;
}
set_obj_size(_size);
}
- void get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, string *override_prefix, rgw_obj_select *location);
+ void get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs,
+ string *override_prefix, rgw_obj_select *location) const;
void set_trivial_rule(uint64_t tail_ofs, uint64_t stripe_max_size) {
RGWObjManifestRule rule(0, tail_ofs, 0, stripe_max_size);
decode(tail_placement.placement_rule, bl);
}
- update_iterators();
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(list<RGWObjManifest*>& o);
- int append(RGWObjManifest& m, const RGWZoneGroup& zonegroup,
+ int append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup,
const RGWZoneParams& zone_params);
- int append(RGWObjManifest& m, rgw::sal::Zone* zone);
+ int append(const DoutPrefixProvider *dpp, RGWObjManifest& m, rgw::sal::Zone* zone);
bool get_rule(uint64_t ofs, RGWObjManifestRule *rule);
- bool empty() {
+ bool empty() const {
if (explicit_objs)
return objs.empty();
return rules.empty();
}
- bool has_explicit_objs() {
+ bool has_explicit_objs() const {
return explicit_objs;
}
- bool has_tail() {
+ bool has_tail() const {
if (explicit_objs) {
if (objs.size() == 1) {
- map<uint64_t, RGWObjManifestPart>::iterator iter = objs.begin();
- rgw_obj& o = iter->second.loc;
+ auto iter = objs.begin();
+ const rgw_obj& o = iter->second.loc;
return !(obj == o);
}
return (objs.size() >= 2);
}
}
- const rgw_obj& get_obj() {
+ const rgw_obj& get_obj() const {
return obj;
}
tail_placement.bucket = _b;
}
- const rgw_bucket_placement& get_tail_placement() {
+ const rgw_bucket_placement& get_tail_placement() const {
return tail_placement;
}
- const rgw_placement_rule& get_head_placement_rule() {
+ const rgw_placement_rule& get_head_placement_rule() const {
return head_placement_rule;
}
prefix = _p;
}
- const string& get_prefix() {
+ const string& get_prefix() const {
return prefix;
}
tail_instance = _ti;
}
- const string& get_tail_instance() {
+ const string& get_tail_instance() const {
return tail_instance;
}
void set_obj_size(uint64_t s) {
obj_size = s;
-
- update_iterators();
}
- uint64_t get_obj_size() {
+ uint64_t get_obj_size() const {
return obj_size;
}
- uint64_t get_head_size() {
+ uint64_t get_head_size() const {
return head_size;
}
- uint64_t get_max_head_size() {
+ uint64_t get_max_head_size() const {
return max_head_size;
}
class obj_iterator {
- RGWObjManifest *manifest = nullptr;
+ const DoutPrefixProvider *dpp;
+ const RGWObjManifest *manifest = nullptr;
uint64_t part_ofs = 0; /* where current part starts */
uint64_t stripe_ofs = 0; /* where current stripe starts */
uint64_t ofs = 0; /* current position within the object */
rgw_obj_select location;
- map<uint64_t, RGWObjManifestRule>::iterator rule_iter;
- map<uint64_t, RGWObjManifestRule>::iterator next_rule_iter;
-
- map<uint64_t, RGWObjManifestPart>::iterator explicit_iter;
+ map<uint64_t, RGWObjManifestRule>::const_iterator rule_iter;
+ map<uint64_t, RGWObjManifestRule>::const_iterator next_rule_iter;
+ map<uint64_t, RGWObjManifestPart>::const_iterator explicit_iter;
void update_explicit_pos();
-
- protected:
-
- void set_manifest(RGWObjManifest *m) {
- manifest = m;
- }
-
public:
obj_iterator() = default;
- explicit obj_iterator(RGWObjManifest *_m)
- : obj_iterator(_m, 0)
+ explicit obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m)
+ : obj_iterator(_dpp, _m, 0)
{}
- obj_iterator(RGWObjManifest *_m, uint64_t _ofs) : manifest(_m) {
+ obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m, uint64_t _ofs) : dpp(_dpp), manifest(_m) {
seek(_ofs);
}
void seek(uint64_t ofs);
void update_location();
- friend class RGWObjManifest;
void dump(Formatter *f) const;
}; // class obj_iterator
- const obj_iterator& obj_begin();
- const obj_iterator& obj_end();
- obj_iterator obj_find(uint64_t ofs);
-
- obj_iterator begin_iter;
- obj_iterator end_iter;
+ obj_iterator obj_begin(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this}; }
+ obj_iterator obj_end(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this, obj_size}; }
+ obj_iterator obj_find(const DoutPrefixProvider *dpp, uint64_t ofs) const {
+ return obj_iterator{dpp, this, std::min(ofs, obj_size)};
+ }
/*
* simple object generator. Using a simple single rule manifest.
return 0;
}
-int RGWObjExpStore::objexp_hint_add(const ceph::real_time& delete_at,
+int RGWObjExpStore::objexp_hint_add(const DoutPrefixProvider *dpp,
+ const ceph::real_time& delete_at,
const string& tenant_name,
const string& bucket_name,
const string& bucket_id,
string shard_name = objexp_hint_get_shardname(objexp_key_shard(obj_key, cct->_conf->rgw_objexp_hints_num_shards));
auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_params().log_pool, shard_name));
- int r = obj.open();
+ int r = obj.open(dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
return r;
}
- return obj.operate(&op, null_yield);
+ return obj.operate(dpp, &op, null_yield);
}
-int RGWObjExpStore::objexp_hint_list(const string& oid,
+int RGWObjExpStore::objexp_hint_list(const DoutPrefixProvider *dpp,
+ const string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const int max_entries,
out_marker, truncated);
auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_params().log_pool, oid));
- int r = obj.open();
+ int r = obj.open(dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
return r;
}
bufferlist obl;
- int ret = obj.operate(&op, &obl, null_yield);
+ int ret = obj.operate(dpp, &op, &obl, null_yield);
if ((ret < 0 ) && (ret != -ENOENT)) {
return ret;
return 0;
}
-static int cls_timeindex_trim_repeat(rgw_rados_ref ref,
+static int cls_timeindex_trim_repeat(const DoutPrefixProvider *dpp,
+ rgw_rados_ref ref,
const string& oid,
const utime_t& from_time,
const utime_t& to_time,
do {
librados::ObjectWriteOperation op;
cls_timeindex_trim(op, from_time, to_time, from_marker, to_marker);
- int r = rgw_rados_operate(ref.pool.ioctx(), oid, &op, null_yield);
+ int r = rgw_rados_operate(dpp, ref.pool.ioctx(), oid, &op, null_yield);
if (r == -ENODATA)
done = true;
else if (r < 0)
return 0;
}
-int RGWObjExpStore::objexp_hint_trim(const string& oid,
+int RGWObjExpStore::objexp_hint_trim(const DoutPrefixProvider *dpp,
+ const string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const string& from_marker,
const string& to_marker)
{
auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_params().log_pool, oid));
- int r = obj.open();
+ int r = obj.open(dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
return r;
}
auto& ref = obj.get_ref();
- int ret = cls_timeindex_trim_repeat(ref, oid, utime_t(start_time), utime_t(end_time),
+ int ret = cls_timeindex_trim_repeat(dpp, ref, oid, utime_t(start_time), utime_t(end_time),
from_marker, to_marker);
if ((ret < 0 ) && (ret != -ENOENT)) {
return ret;
return;
}
-void RGWObjectExpirer::trim_chunk(const string& shard,
+void RGWObjectExpirer::trim_chunk(const DoutPrefixProvider *dpp,
+ const string& shard,
const utime_t& from,
const utime_t& to,
const string& from_marker,
const string& to_marker)
{
- ldout(store->ctx(), 20) << "trying to trim removal hints to=" << to
+ ldpp_dout(dpp, 20) << "trying to trim removal hints to=" << to
<< ", to_marker=" << to_marker << dendl;
real_time rt_from = from.to_real_time();
real_time rt_to = to.to_real_time();
- int ret = exp_store.objexp_hint_trim(shard, rt_from, rt_to,
+ int ret = exp_store.objexp_hint_trim(dpp, shard, rt_from, rt_to,
from_marker, to_marker);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR during trim: " << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR during trim: " << ret << dendl;
}
return;
real_time rt_start = round_start.to_real_time();
list<cls_timeindex_entry> entries;
- ret = exp_store.objexp_hint_list(shard, rt_last, rt_start,
+ ret = exp_store.objexp_hint_list(dpp, shard, rt_last, rt_start,
num_entries, marker, entries,
&out_marker, &truncated);
if (ret < 0) {
garbage_chunk(dpp, entries, need_trim);
if (need_trim) {
- trim_chunk(shard, last_run, round_start, marker, out_marker);
+ trim_chunk(dpp, shard, last_run, round_start, marker, out_marker);
}
utime_t now = ceph_clock_now();
rados_svc(_rados_svc),
zone_svc(_zone_svc) {}
- int objexp_hint_add(const ceph::real_time& delete_at,
+ int objexp_hint_add(const DoutPrefixProvider *dpp,
+ const ceph::real_time& delete_at,
const string& tenant_name,
const string& bucket_name,
const string& bucket_id,
const rgw_obj_index_key& obj_key);
- int objexp_hint_list(const string& oid,
+ int objexp_hint_list(const DoutPrefixProvider *dpp,
+ const string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const int max_entries,
string *out_marker, /* out */
bool *truncated); /* out */
- int objexp_hint_trim(const string& oid,
+ int objexp_hint_trim(const DoutPrefixProvider *dpp,
+ const string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const string& from_marker,
stop_processor();
}
- int hint_add(const ceph::real_time& delete_at,
+ int hint_add(const DoutPrefixProvider *dpp,
+ const ceph::real_time& delete_at,
const string& tenant_name,
const string& bucket_name,
const string& bucket_id,
const rgw_obj_index_key& obj_key) {
- return exp_store.objexp_hint_add(delete_at, tenant_name, bucket_name,
+ return exp_store.objexp_hint_add(dpp, delete_at, tenant_name, bucket_name,
bucket_id, obj_key);
}
std::list<cls_timeindex_entry>& entries, /* in */
bool& need_trim); /* out */
- void trim_chunk(const std::string& shard,
+ void trim_chunk(const DoutPrefixProvider *dpp,
+ const std::string& shard,
const utime_t& from,
const utime_t& to,
const string& from_marker,
return r;
}
-static int decode_policy(CephContext *cct,
+static int decode_policy(const DoutPrefixProvider *dpp,
+ CephContext *cct,
bufferlist& bl,
RGWAccessControlPolicy *policy)
{
try {
policy->decode(iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
- ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
+ ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy";
RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
s3policy->to_xml(*_dout);
*_dout << dendl;
}
-static int get_user_policy_from_attr(CephContext * const cct,
+static int get_user_policy_from_attr(const DoutPrefixProvider *dpp,
+ CephContext * const cct,
map<string, bufferlist>& attrs,
RGWAccessControlPolicy& policy /* out */)
{
auto aiter = attrs.find(RGW_ATTR_ACL);
if (aiter != attrs.end()) {
- int ret = decode_policy(cct, aiter->second, &policy);
+ int ret = decode_policy(dpp, cct, aiter->second, &policy);
if (ret < 0) {
return ret;
}
map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL);
if (aiter != bucket_attrs.end()) {
- int ret = decode_policy(cct, aiter->second, policy);
+ int ret = decode_policy(dpp, cct, aiter->second, policy);
if (ret < 0)
return ret;
} else {
ret = rop->get_attr(dpp, RGW_ATTR_ACL, bl, y);
if (ret >= 0) {
- ret = decode_policy(cct, bl, policy);
+ ret = decode_policy(dpp, cct, bl, policy);
if (ret < 0)
return ret;
} else if (ret == -ENODATA) {
ret = acl_user->read_attrs(dpp, y);
if (!ret) {
- ret = get_user_policy_from_attr(s->cct, acl_user->get_attrs(), *s->user_acl);
+ ret = get_user_policy_from_attr(dpp, s->cct, acl_user->get_attrs(), *s->user_acl);
}
if (-ENOENT == ret) {
/* In already existing clusters users won't have ACL. In such case
else ret = -EACCES;
}
} catch (const std::exception& e) {
- lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl;
+ ldpp_dout(dpp, -1) << "Error reading IAM User Policy: " << e.what() << dendl;
ret = -EACCES;
}
}
void RGWPutBucketTags::execute(optional_yield y)
{
- op_ret = get_params(y);
+ op_ret = get_params(this, y);
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
}
void RGWDeleteBucketTags::execute(optional_yield y)
{
bufferlist in_data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteBucketReplication::execute(optional_yield y)
{
bufferlist in_data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
}
- op_ret = rgw_policy_from_attrset(s->cct, part->get_attrs(), &obj_policy);
+ op_ret = rgw_policy_from_attrset(s, s->cct, part->get_attrs(), &obj_policy);
if (op_ret < 0)
return op_ret;
bufferlist::const_iterator iter{&tags->second};
s->tagset.decode(iter);
} catch (buffer::error& err) {
- ldout(s->cct, 0)
+ ldpp_dout(s, 0)
<< "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
}
}
RGWUsageIter usage_iter;
while (s->bucket && is_truncated) {
- op_ret = s->bucket->read_usage(start_epoch, end_epoch, max_entries, &is_truncated,
+ op_ret = s->bucket->read_usage(this, start_epoch, end_epoch, max_entries, &is_truncated,
usage_iter, usage);
if (op_ret == -ENOENT) {
op_ret = 0;
return;
}
- op_ret = s->user->read_stats(y, &stats);
+ op_ret = s->user->read_stats(this, y, &stats);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
return;
}
if (!lastmarker) {
- lderr(s->cct) << "ERROR: rgw_read_user_buckets, stasis at marker="
+ ldpp_dout(this, -1) << "ERROR: rgw_read_user_buckets, stasis at marker="
<< marker << " uid=" << s->user->get_id() << dendl;
break;
}
}
}
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
return;
{
bufferlist in_data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name()
<< "returned err=" << op_ret << dendl;
if (need_metadata_upload()) {
/* It's supposed that following functions WILL NOT change any special
* attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
if (op_ret < 0) {
return;
}
attrs.clear();
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
if (op_ret < 0) {
return;
}
}
}
- op_ret = s->bucket->sync_user_stats(y);
+ op_ret = s->bucket->sync_user_stats(this, y);
if ( op_ret < 0) {
ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
}
}
bufferlist in_data;
- op_ret = store->forward_request_to_master(s->user.get(), &ot.read_version, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y);
if (op_ret < 0) {
if (op_ret == -ENOENT) {
/* adjust error, we want to return with NoSuchBucket and not
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res = store->get_notification(s->object.get(),
s, rgw::notify::ObjectCreatedPut);
- op_ret = res->publish_reserve(obj_tags.get());
+ op_ret = res->publish_reserve(this, obj_tags.get());
if (op_ret < 0) {
return;
}
emplace_attr(RGW_ATTR_ETAG, std::move(bl));
populate_with_generic_attrs(s, attrs);
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
if (op_ret < 0) {
return;
}
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res = store->get_notification(s->object.get(), s, rgw::notify::ObjectCreatedPost);
- op_ret = res->publish_reserve();
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return;
}
attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
}
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
if (op_ret < 0) {
return op_ret;
}
return;
}
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
if (op_ret < 0) {
return;
}
return;
}
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
if (op_ret < 0) {
return;
}
rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete;
std::unique_ptr<rgw::sal::Notification> res = store->get_notification(s->object.get(),
s, event_type);
- op_ret = res->publish_reserve();
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return;
}
dest_policy.encode(aclbl);
emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
if (op_ret < 0) {
return op_ret;
}
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res = store->get_notification(s->object.get(),
s, rgw::notify::ObjectCreatedCopy);
- op_ret = res->publish_reserve();
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return;
}
if (s->canned_acl.empty()) {
in_data.append(data);
}
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
if (s->bucket_access_conf &&
s->bucket_access_conf->block_public_acls() &&
- new_policy.is_public()) {
+ new_policy.is_public(this)) {
op_ret = -EACCES;
return;
}
ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
}
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteLC::execute(optional_yield y)
{
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteCORS::execute(optional_yield y)
{
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWSetRequestPayment::execute(optional_yield y)
{
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
if (op_ret != 0)
return;
- op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
if (op_ret < 0) {
return;
}
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res = store->get_notification(s->object.get(),
s, rgw::notify::ObjectCreatedPost);
- op_ret = res->publish_reserve();
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return;
}
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res = store->get_notification(s->object.get(),
s, rgw::notify::ObjectCreatedCompleteMultipartUpload);
- op_ret = res->publish_reserve();
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return;
}
s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
utime_t dur(max_lock_secs_mp, 0);
- serializer = meta_obj->get_serializer("RGWCompleteMultipart");
- op_ret = serializer->try_lock(dur, y);
+ serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart");
+ op_ret = serializer->try_lock(this, dur, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "failed to acquire lock" << dendl;
- if (op_ret == -ENOENT && check_previously_completed(this, parts)) {
+ if (op_ret == -ENOENT && check_previously_completed(parts)) {
ldpp_dout(this, 1) << "NOTICE: This multipart completion is already completed" << dendl;
op_ret = 0;
return;
attrs = meta_obj->get_attrs();
do {
- op_ret = list_multipart_parts(s, upload_id, meta_oid, max_parts,
+ op_ret = list_multipart_parts(this, s, upload_id, meta_oid, max_parts,
marker, obj_parts, &marker, &truncated);
if (op_ret == -ENOENT) {
op_ret = -ERR_NO_SUCH_UPLOAD;
op_ret = -ERR_INVALID_PART;
return;
} else {
- manifest.append(obj_part.manifest, store->get_zone());
+ manifest.append(this, obj_part.manifest, store->get_zone());
}
bool part_compressed = (obj_part.cs_info.compression_type != "none");
}
}
-bool RGWCompleteMultipart::check_previously_completed(const DoutPrefixProvider* dpp, const RGWMultiCompleteUpload* parts)
+bool RGWCompleteMultipart::check_previously_completed(const RGWMultiCompleteUpload* parts)
{
// re-calculate the etag from the parts and compare to the existing object
s->object->set_bucket(s->bucket.get());
int ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this);
if (ret < 0) {
- ldpp_dout(dpp, 0) << __func__ << "() ERROR: get_obj_attrs() returned ret=" << ret << dendl;
+ ldpp_dout(this, 0) << __func__ << "() ERROR: get_obj_attrs() returned ret=" << ret << dendl;
return false;
}
rgw::sal::Attrs sattrs = s->object->get_attrs();
char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
hex_to_buf(partetag.c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE);
hash.Update((const unsigned char *)petag, sizeof(petag));
- ldpp_dout(dpp, 20) << __func__ << "() re-calculating multipart etag: part: "
+ ldpp_dout(this, 20) << __func__ << "() re-calculating multipart etag: part: "
<< index << ", etag: " << partetag << dendl;
}
"-%lld", (long long)parts->parts.size());
if (oetag.compare(final_etag_str) != 0) {
- ldpp_dout(dpp, 1) << __func__ << "() NOTICE: etag mismatch: object etag:"
+ ldpp_dout(this, 1) << __func__ << "() NOTICE: etag mismatch: object etag:"
<< oetag << ", re-calculated etag:" << final_etag_str << dendl;
return false;
}
- ldpp_dout(dpp, 5) << __func__ << "() object etag and re-calculated etag match, etag: " << oetag << dendl;
+ ldpp_dout(this, 5) << __func__ << "() object etag and re-calculated etag match, etag: " << oetag << dendl;
return true;
}
if (op_ret < 0)
return;
- op_ret = list_multipart_parts(s, upload_id, meta_oid, max_parts,
+ op_ret = list_multipart_parts(this, s, upload_id, meta_oid, max_parts,
marker, parts, NULL, &truncated);
}
rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete;
std::unique_ptr<rgw::sal::Notification> res = store->get_notification(obj.get(),
s, event_type);
- op_ret = res->publish_reserve();
+ op_ret = res->publish_reserve(this);
if (op_ret < 0) {
send_partial_response(*iter, false, "", op_ret);
continue;
{
int ret = rgw_build_bucket_policies(dpp, store, s, y);
if (ret < 0) {
- ldpp_dout(s, 10) << "init_permissions on " << s->bucket
+ ldpp_dout(dpp, 10) << "init_permissions on " << s->bucket
<< " failed, ret=" << ret << dendl;
return ret==-ENODATA ? -EACCES : ret;
}
return;
}
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
try {
RGWXMLDecoder::decode_xml("ObjectLockConfiguration", obj_lock, &parser, true);
} catch (RGWXMLDecoder::err& err) {
- ldout(s->cct, 5) << "unexpected xml:" << err << dendl;
+ ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
op_ret = -ERR_MALFORMED_XML;
return;
}
return;
}
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
- ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
try {
obj_retention.decode(iter);
} catch (const buffer::error& e) {
- ldout(s->cct, 0) << __func__ << "decode object retention config failed" << dendl;
+ ldpp_dout(this, 0) << __func__ << "decode object retention config failed" << dendl;
op_ret = -EIO;
return;
}
try {
RGWXMLDecoder::decode_xml("LegalHold", obj_legal_hold, &parser, true);
} catch (RGWXMLDecoder::err &err) {
- ldout(s->cct, 5) << "unexpected xml:" << err << dendl;
+ ldpp_dout(this, 5) << "unexpected xml:" << err << dendl;
op_ret = -ERR_MALFORMED_XML;
return;
}
try {
obj_legal_hold.decode(iter);
} catch (const buffer::error& e) {
- ldout(s->cct, 0) << __func__ << "decode object legal hold config failed" << dendl;
+ ldpp_dout(this, 0) << __func__ << "decode object legal hold config failed" << dendl;
op_ret = -EIO;
return;
}
void RGWGetBucketPolicyStatus::execute(optional_yield y)
{
- isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public();
+ isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public(this);
}
int RGWPutBucketPublicAccessBlock::verify_permission(optional_yield y)
return;
}
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void execute(optional_yield y) override;
virtual void send_response() override = 0;
- virtual int get_params(optional_yield y) = 0;
+ virtual int get_params(const DoutPrefixProvider *dpp, optional_yield y) = 0;
const char* name() const override { return "put_bucket_tags"; }
virtual uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; }
RGWOpType get_type() override { return RGW_OP_PUT_BUCKET_TAGGING; }
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
- bool check_previously_completed(const DoutPrefixProvider* dpp, const RGWMultiCompleteUpload* parts);
+ bool check_previously_completed(const RGWMultiCompleteUpload* parts);
void complete() override;
virtual int get_params(optional_yield y) = 0;
string err;
*olh_epoch = strict_strtol(epoch_str.c_str(), 10, &err);
if (!err.empty()) {
- lsubdout(s->cct, rgw, 0) << "failed to parse versioned-epoch param"
+ ldpp_subdout(s, rgw, 0) << "failed to parse versioned-epoch param"
<< dendl;
return -EINVAL;
}
* On failure returns a negative error code.
*
*/
-inline int rgw_get_request_metadata(CephContext* const cct,
+inline int rgw_get_request_metadata(const DoutPrefixProvider *dpp,
+ CephContext* const cct,
struct req_info& info,
std::map<std::string, ceph::bufferlist>& attrs,
const bool allow_empty_attrs = true)
std::string& xattr = kv.second;
if (blocklisted_headers.count(name) == 1) {
- lsubdout(cct, rgw, 10) << "skipping x>> " << name << dendl;
+ ldpp_subdout(dpp, rgw, 10) << "skipping x>> " << name << dendl;
continue;
} else if (allow_empty_attrs || !xattr.empty()) {
- lsubdout(cct, rgw, 10) << "x>> " << name << ":" << xattr << dendl;
+ ldpp_subdout(dpp, rgw, 10) << "x>> " << name << ":" << xattr << dendl;
format_xattr(xattr);
std::string attr_name(RGW_ATTR_PREFIX);
return 0;
}
-int RGWOrphanStore::init()
+int RGWOrphanStore::init(const DoutPrefixProvider *dpp)
{
const rgw_pool& log_pool = store->get_zone()->get_params().log_pool;
- int r = rgw_init_ioctx(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_rados_handle(), log_pool, ioctx);
+ int r = rgw_init_ioctx(dpp, static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_rados_handle(), log_pool, ioctx);
if (r < 0) {
cerr << "ERROR: failed to open log pool (" << log_pool << " ret=" << r << std::endl;
return r;
return 0;
}
-int RGWOrphanStore::store_entries(const string& oid, const map<string, bufferlist>& entries)
+int RGWOrphanStore::store_entries(const DoutPrefixProvider *dpp, const string& oid, const map<string, bufferlist>& entries)
{
librados::ObjectWriteOperation op;
op.omap_set(entries);
cout << "storing " << entries.size() << " entries at " << oid << std::endl;
- ldout(store->ctx(), 20) << "storing " << entries.size() << " entries at " << oid << ": " << dendl;
+ ldpp_dout(dpp, 20) << "storing " << entries.size() << " entries at " << oid << ": " << dendl;
for (map<string, bufferlist>::const_iterator iter = entries.begin(); iter != entries.end(); ++iter) {
- ldout(store->ctx(), 20) << " > " << iter->first << dendl;
+ ldpp_dout(dpp, 20) << " > " << iter->first << dendl;
}
- int ret = rgw_rados_operate(ioctx, oid, &op, null_yield);
+ int ret = rgw_rados_operate(dpp, ioctx, oid, &op, null_yield);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: " << __func__ << "(" << oid << ") returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: " << __func__ << "(" << oid << ") returned ret=" << ret << dendl;
}
return 0;
return 0;
}
-int RGWOrphanSearch::init(const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode)
+int RGWOrphanSearch::init(const DoutPrefixProvider *dpp, const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode)
{
- int r = orphan_store.init();
+ int r = orphan_store.init(dpp);
if (r < 0) {
return r;
}
RGWOrphanSearchState state;
r = orphan_store.read_job(job_name, state);
if (r < 0 && r != -ENOENT) {
- lderr(store->ctx()) << "ERROR: failed to read state ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to read state ret=" << r << dendl;
return r;
}
r = save_state();
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to write state ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to write state ret=" << r << dendl;
return r;
}
} else {
- lderr(store->ctx()) << "ERROR: job not found" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: job not found" << dendl;
return r;
}
return 0;
}
-int RGWOrphanSearch::log_oids(map<int, string>& log_shards, map<int, list<string> >& oids)
+int RGWOrphanSearch::log_oids(const DoutPrefixProvider *dpp, map<int, string>& log_shards, map<int, list<string> >& oids)
{
map<int, list<string> >::iterator miter = oids.begin();
map<string, bufferlist> entries;
#define MAX_OMAP_SET_ENTRIES 100
for (int j = 0; cur != end && j != MAX_OMAP_SET_ENTRIES; ++cur, ++j) {
- ldout(store->ctx(), 20) << "adding obj: " << *cur << dendl;
+ ldpp_dout(dpp, 20) << "adding obj: " << *cur << dendl;
entries[*cur] = bufferlist();
}
- int ret = orphan_store.store_entries(cur_info.oid, entries);
+ int ret = orphan_store.store_entries(dpp, cur_info.oid, entries);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWOrphanSearch::build_all_oids_index()
+int RGWOrphanSearch::build_all_oids_index(const DoutPrefixProvider *dpp)
{
librados::IoCtx ioctx;
- int ret = rgw_init_ioctx(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_rados_handle(), search_info.pool, ioctx);
+ int ret = rgw_init_ioctx(dpp, static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_rados_handle(), search_info.pool, ioctx);
if (ret < 0) {
- lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
return ret;
}
++total;
if (++count >= COUNT_BEFORE_FLUSH) {
ldout(store->ctx(), 1) << "iterated through " << total << " objects" << dendl;
- ret = log_oids(all_objs_index, oids);
+ ret = log_oids(dpp, all_objs_index, oids);
if (ret < 0) {
cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl;
return ret;
oids.clear();
}
}
- ret = log_oids(all_objs_index, oids);
+ ret = log_oids(dpp, all_objs_index, oids);
if (ret < 0) {
cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl;
return ret;
return 0;
}
-int RGWOrphanSearch::build_buckets_instance_index()
+int RGWOrphanSearch::build_buckets_instance_index(const DoutPrefixProvider *dpp)
{
void *handle;
int max = 1000;
string section = "bucket.instance";
- int ret = store->meta_list_keys_init(section, string(), &handle);
+ int ret = store->meta_list_keys_init(dpp, section, string(), &handle);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl;
return ret;
}
list<string> keys;
ret = store->meta_list_keys_next(handle, max, keys, &truncated);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl;
return ret;
}
for (list<string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) {
++total;
- ldout(store->ctx(), 10) << "bucket_instance=" << *iter << " total=" << total << dendl;
+ ldpp_dout(dpp, 10) << "bucket_instance=" << *iter << " total=" << total << dendl;
int shard = orphan_shard(*iter);
instances[shard].push_back(*iter);
if (++count >= COUNT_BEFORE_FLUSH) {
- ret = log_oids(buckets_instance_index, instances);
+ ret = log_oids(dpp, buckets_instance_index, instances);
if (ret < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl;
return ret;
}
count = 0;
store->meta_list_keys_complete(handle);
- ret = log_oids(buckets_instance_index, instances);
+ ret = log_oids(dpp, buckets_instance_index, instances);
if (ret < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWOrphanSearch::handle_stat_result(map<int, list<string> >& oids, rgw::sal::Object::StatOp::Result& result)
+int RGWOrphanSearch::handle_stat_result(const DoutPrefixProvider *dpp, map<int, list<string> >& oids, rgw::sal::Object::StatOp::Result& result)
{
set<string> obj_oids;
rgw::sal::Bucket* bucket = result.obj->get_bucket();
if (!detailed_mode &&
manifest.get_obj_size() <= manifest.get_head_size()) {
- ldout(store->ctx(), 5) << "skipping object as it fits in a head" << dendl;
+ ldpp_dout(dpp, 5) << "skipping object as it fits in a head" << dendl;
return 0;
}
RGWObjManifest::obj_iterator miter;
- for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) {
+ for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) {
const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store);
string s = loc.oid;
obj_oids.insert(obj_fingerprint(s));
}
for (set<string>::iterator iter = obj_oids.begin(); iter != obj_oids.end(); ++iter) {
- ldout(store->ctx(), 20) << __func__ << ": oid for obj=" << result.obj << ": " << *iter << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": oid for obj=" << result.obj << ": " << *iter << dendl;
int shard = orphan_shard(*iter);
oids[shard].push_back(*iter);
return 0;
}
-int RGWOrphanSearch::pop_and_handle_stat_op(map<int, list<string> >& oids, std::deque<std::unique_ptr<rgw::sal::Object::StatOp>>& ops)
+int RGWOrphanSearch::pop_and_handle_stat_op(const DoutPrefixProvider *dpp, map<int, list<string> >& oids, std::deque<std::unique_ptr<rgw::sal::Object::StatOp>>& ops)
{
rgw::sal::Object::StatOp* front_op = ops.front().get();
int ret = front_op->wait();
if (ret < 0) {
if (ret != -ENOENT) {
- lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
}
goto done;
}
- ret = handle_stat_result(oids, front_op->result);
+ ret = handle_stat_result(dpp, oids, front_op->result);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: handle_stat_response() returned error: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: handle_stat_response() returned error: " << cpp_strerror(-ret) << dendl;
}
done:
ops.pop_front();
stat_ops.push_back(std::move(stat_op));
- ret = op->stat_async();
+ ret = op->stat_async(dpp);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
return ret;
}
if (stat_ops.size() >= max_concurrent_ios) {
- ret = pop_and_handle_stat_op(oids, stat_ops);
+ ret = pop_and_handle_stat_op(dpp, oids, stat_ops);
if (ret < 0) {
if (ret != -ENOENT) {
- lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
}
}
}
if (oids.size() >= COUNT_BEFORE_FLUSH) {
- ret = log_oids(linked_objs_index, oids);
+ ret = log_oids(dpp, linked_objs_index, oids);
if (ret < 0) {
cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl;
return ret;
} while (results.is_truncated);
while (!stat_ops.empty()) {
- ret = pop_and_handle_stat_op(oids, stat_ops);
+ ret = pop_and_handle_stat_op(dpp, oids, stat_ops);
if (ret < 0) {
if (ret != -ENOENT) {
- lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
}
}
}
}
if (ret < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: read_entries() oid=" << oid << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: read_entries() oid=" << oid << " returned ret=" << ret << dendl;
return ret;
}
}
for (map<string, bufferlist>::iterator eiter = entries.begin(); eiter != entries.end(); ++eiter) {
- ldout(store->ctx(), 20) << " indexed entry: " << eiter->first << dendl;
+ ldpp_dout(dpp, 20) << " indexed entry: " << eiter->first << dendl;
ret = build_linked_oids_for_bucket(dpp, eiter->first, oids);
if (ret < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: build_linked_oids_for_bucket() indexed entry=" << eiter->first
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_linked_oids_for_bucket() indexed entry=" << eiter->first
<< " returned ret=" << ret << dendl;
return ret;
}
search_stage.marker.clear();
}
- int ret = log_oids(linked_objs_index, oids);
+ int ret = log_oids(dpp, linked_objs_index, oids);
if (ret < 0) {
cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl;
return ret;
return get_next(key, pbl, done);
}
-int RGWOrphanSearch::compare_oid_indexes()
+int RGWOrphanSearch::compare_oid_indexes(const DoutPrefixProvider *dpp)
{
ceph_assert(linked_objs_index.size() == all_objs_index.size());
librados::IoCtx data_ioctx;
- int ret = rgw_init_ioctx(static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_rados_handle(), search_info.pool, data_ioctx);
+ int ret = rgw_init_ioctx(dpp, static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_rados_handle(), search_info.pool, data_ioctx);
if (ret < 0) {
- lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
return ret;
}
}
if (cur_linked == key_fp) {
- ldout(store->ctx(), 20) << "linked: " << key << dendl;
+ ldpp_dout(dpp, 20) << "linked: " << key << dendl;
continue;
}
r = data_ioctx.stat(key, NULL, &mtime);
if (r < 0) {
if (r != -ENOENT) {
- lderr(store->ctx()) << "ERROR: ioctx.stat(" << key << ") returned ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: ioctx.stat(" << key << ") returned ret=" << r << dendl;
}
continue;
}
if (stale_secs && (uint64_t)mtime >= time_threshold) {
- ldout(store->ctx(), 20) << "skipping: " << key << " (mtime=" << mtime << " threshold=" << time_threshold << ")" << dendl;
+ ldpp_dout(dpp, 20) << "skipping: " << key << " (mtime=" << mtime << " threshold=" << time_threshold << ")" << dendl;
continue;
}
- ldout(store->ctx(), 20) << "leaked: " << key << dendl;
+ ldpp_dout(dpp, 20) << "leaked: " << key << dendl;
cout << "leaked: " << key << std::endl;
} while (!done);
}
search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSPOOL);
r = save_state();
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
return r;
}
// fall through
case ORPHAN_SEARCH_STAGE_LSPOOL:
ldpp_dout(dpp, 0) << __func__ << "(): building index of all objects in pool" << dendl;
- r = build_all_oids_index();
+ r = build_all_oids_index(dpp);
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
return r;
}
search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSBUCKETS);
r = save_state();
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
return r;
}
// fall through
case ORPHAN_SEARCH_STAGE_LSBUCKETS:
ldpp_dout(dpp, 0) << __func__ << "(): building index of all bucket indexes" << dendl;
- r = build_buckets_instance_index();
+ r = build_buckets_instance_index(dpp);
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
return r;
}
search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_ITERATE_BI);
r = save_state();
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
return r;
}
// fall through
case ORPHAN_SEARCH_STAGE_ITERATE_BI:
- ldout(store->ctx(), 0) << __func__ << "(): building index of all linked objects" << dendl;
+ ldpp_dout(dpp, 0) << __func__ << "(): building index of all linked objects" << dendl;
r = build_linked_oids_index(dpp);
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
return r;
}
search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_COMPARE);
r = save_state();
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl;
return r;
}
// fall through
case ORPHAN_SEARCH_STAGE_COMPARE:
- r = compare_oid_indexes();
+ r = compare_oid_indexes(dpp);
if (r < 0) {
- lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
return r;
}
}
-int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result,
+int RGWRadosList::handle_stat_result(const DoutPrefixProvider *dpp,
+ rgw::sal::Object::StatOp::Result& result,
std::string& bucket_name,
rgw_obj_key& obj_key,
std::set<string>& obj_oids)
rgw::sal::Bucket* bucket = result.obj->get_bucket();
- ldout(store->ctx(), 20) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRadosList::" << __func__ <<
" bucket=" << bucket <<
", has_manifest=" << !!result.manifest <<
dendl;
// iterator to store result of dlo/slo attribute find
auto attr_it = result.obj->get_attrs().end();
const std::string oid = bucket->get_marker() + "_" + result.obj->get_oid();
- ldout(store->ctx(), 20) << "radoslist processing object=\"" <<
+ ldpp_dout(dpp, 20) << "radoslist processing object=\"" <<
oid << "\"" << dendl;
if (visited_oids.find(oid) != visited_oids.end()) {
// apparently we hit a loop; don't continue with this oid
- ldout(store->ctx(), 15) <<
+ ldpp_dout(dpp, 15) <<
"radoslist stopped loop at already visited object=\"" <<
oid << "\"" << dendl;
return 0;
obj_oids.insert(oid);
visited_oids.insert(oid); // prevent dlo loops
- ldout(store->ctx(), 15) << "radoslist added to visited list DLO=\"" <<
+ ldpp_dout(dpp, 15) << "radoslist added to visited list DLO=\"" <<
oid << "\"" << dendl;
char* prefix_path_c = attr_it->second.c_str();
const std::string prefix = prefix_path.substr(sep_pos + 1);
add_bucket_prefix(bucket_name, prefix);
- ldout(store->ctx(), 25) << "radoslist DLO oid=\"" << oid <<
+ ldpp_dout(dpp, 25) << "radoslist DLO oid=\"" << oid <<
"\" added bucket=\"" << bucket_name << "\" prefix=\"" <<
prefix << "\" to process list" << dendl;
} else if ((attr_it = result.obj->get_attrs().find(RGW_ATTR_SLO_MANIFEST)) !=
obj_oids.insert(oid);
visited_oids.insert(oid); // prevent slo loops
- ldout(store->ctx(), 15) << "radoslist added to visited list SLO=\"" <<
+ ldpp_dout(dpp, 15) << "radoslist added to visited list SLO=\"" <<
oid << "\"" << dendl;
RGWSLOInfo slo_info;
try {
::decode(slo_info, bliter);
} catch (buffer::error& err) {
- ldout(store->ctx(), 0) <<
+ ldpp_dout(dpp, 0) <<
"ERROR: failed to decode slo manifest for " << oid << dendl;
return -EIO;
}
const rgw_obj_key obj_key(obj_name);
add_bucket_filter(bucket_name, obj_key);
- ldout(store->ctx(), 25) << "radoslist SLO oid=\"" << oid <<
+ ldpp_dout(dpp, 25) << "radoslist SLO oid=\"" << oid <<
"\" added bucket=\"" << bucket_name << "\" obj_key=\"" <<
obj_key << "\" to process list" << dendl;
}
// manifest AND empty objects have no manifest, but they're
// realized as empty rados objects
if (0 == manifest.get_max_head_size() ||
- manifest.obj_begin() == manifest.obj_end()) {
+ manifest.obj_begin(dpp) == manifest.obj_end(dpp)) {
obj_oids.insert(oid);
// first_insert = true;
}
RGWObjManifest::obj_iterator miter;
- for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) {
+ for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) {
const rgw_raw_obj& loc =
miter.get_location().get_raw_obj(store);
string s = loc.oid;
} // RGWRadosList::handle_stat_result
int RGWRadosList::pop_and_handle_stat_op(
+ const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
std::deque<std::unique_ptr<rgw::sal::Object::StatOp>>& ops)
{
int ret = front_op->wait();
if (ret < 0) {
if (ret != -ENOENT) {
- lderr(store->ctx()) << "ERROR: stat_async() returned error: " <<
+ ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " <<
cpp_strerror(-ret) << dendl;
}
goto done;
}
- ret = handle_stat_result(front_op->result, bucket_name, obj_key, obj_oids);
+ ret = handle_stat_result(dpp, front_op->result, bucket_name, obj_key, obj_oids);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: handle_stat_result() returned error: " <<
+ ldpp_dout(dpp, -1) << "ERROR: handle_stat_result() returned error: " <<
cpp_strerror(-ret) << dendl;
}
objs.push_back(std::move(obj));
stat_ops.push_back(std::move(stat_op));
- ret = op->stat_async();
+ ret = op->stat_async(dpp);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: stat_async() returned error: " <<
+ ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " <<
cpp_strerror(-ret) << dendl;
return ret;
}
if (stat_ops.size() >= max_concurrent_ios) {
- ret = pop_and_handle_stat_op(obj_ctx, stat_ops);
+ ret = pop_and_handle_stat_op(dpp, obj_ctx, stat_ops);
if (ret < 0) {
if (ret != -ENOENT) {
- lderr(store->ctx()) <<
+ ldpp_dout(dpp, -1) <<
"ERROR: pop_and_handle_stat_op() returned error: " <<
cpp_strerror(-ret) << dendl;
}
} while (results.is_truncated);
while (!stat_ops.empty()) {
- ret = pop_and_handle_stat_op(obj_ctx, stat_ops);
+ ret = pop_and_handle_stat_op(dpp, obj_ctx, stat_ops);
if (ret < 0) {
if (ret != -ENOENT) {
- lderr(store->ctx()) << "ERROR: stat_async() returned error: " <<
+ ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " <<
cpp_strerror(-ret) << dendl;
}
}
int ret;
void* handle = nullptr;
- ret = store->meta_list_keys_init("bucket", string(), &handle);
+ ret = store->meta_list_keys_init(dpp, "bucket", string(), &handle);
if (ret < 0) {
- lderr(store->ctx()) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ <<
" ERROR: list_keys_init returned " <<
cpp_strerror(-ret) << dendl;
return ret;
ret = do_incomplete_multipart(dpp, bucket.get());
if (ret < 0) {
- lderr(store->ctx()) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ <<
": ERROR: do_incomplete_multipart returned ret=" << ret << dendl;
return ret;
}
do { // while (is_parts_truncated);
std::map<uint32_t, RGWUploadPartInfo> parts;
- ret = list_multipart_parts(bucket, store->ctx(),
+ ret = list_multipart_parts(dpp, bucket, store->ctx(),
mp.get_upload_id(), mp.get_meta(),
max_parts, parts_marker,
parts, &parts_marker,
"for " << mp.get_upload_id() << ", moving on" << dendl;
break;
} else if (ret < 0) {
- lderr(store->ctx()) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ <<
": ERROR: list_multipart_parts returned ret=" << ret <<
dendl;
return ret;
for (auto& p : parts) {
RGWObjManifest& manifest = p.second.manifest;
- for (auto obj_it = manifest.obj_begin();
- obj_it != manifest.obj_end();
+ for (auto obj_it = manifest.obj_begin(dpp);
+ obj_it != manifest.obj_end(dpp);
++obj_it) {
const rgw_raw_obj& loc =
obj_it.get_location().get_raw_obj(store);
librados::IoCtx& get_ioctx() { return ioctx; }
- int init();
+ int init(const DoutPrefixProvider *dpp);
int read_job(const string& job_name, RGWOrphanSearchState& state);
int write_job(const string& job_name, const RGWOrphanSearchState& state);
int list_jobs(map<string,RGWOrphanSearchState> &job_list);
- int store_entries(const string& oid, const map<string, bufferlist>& entries);
+ int store_entries(const DoutPrefixProvider *dpp, const string& oid, const map<string, bufferlist>& entries);
int read_entries(const string& oid, const string& marker, map<string, bufferlist> *entries, bool *truncated);
};
list<string>::iterator end;
};
- int log_oids(map<int, string>& log_shards, map<int, list<string> >& oids);
+ int log_oids(const DoutPrefixProvider *dpp, map<int, string>& log_shards, map<int, list<string> >& oids);
#define RGW_ORPHANSEARCH_HASH_PRIME 7877
int orphan_shard(const string& str) {
return ceph_str_hash_linux(str.c_str(), str.size()) % RGW_ORPHANSEARCH_HASH_PRIME % search_info.num_shards;
}
- int handle_stat_result(map<int, list<string> >& oids, rgw::sal::Object::StatOp::Result& result);
- int pop_and_handle_stat_op(map<int, list<string> >& oids, std::deque<std::unique_ptr<rgw::sal::Object::StatOp>>& ops);
-
+ int handle_stat_result(const DoutPrefixProvider *dpp, map<int, list<string> >& oids, rgw::sal::Object::StatOp::Result& result);
+ int pop_and_handle_stat_op(const DoutPrefixProvider *dpp, map<int, list<string> >& oids, std::deque<std::unique_ptr<rgw::sal::Object::StatOp>>& ops);
int remove_index(map<int, string>& index);
public:
return orphan_store.write_job(search_info.job_name, state);
}
- int init(const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode=false);
+ int init(const DoutPrefixProvider *dpp, const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode=false);
int create(const string& job_name, int num_shards);
- int build_all_oids_index();
- int build_buckets_instance_index();
+ int build_all_oids_index(const DoutPrefixProvider *dpp);
+ int build_buckets_instance_index(const DoutPrefixProvider *dpp);
int build_linked_oids_for_bucket(const DoutPrefixProvider *dpp, const string& bucket_instance_id, map<int, list<string> >& oids);
int build_linked_oids_index(const DoutPrefixProvider *dpp);
- int compare_oid_indexes();
+ int compare_oid_indexes(const DoutPrefixProvider *dpp);
int run(const DoutPrefixProvider *dpp);
int finish();
bool include_rgw_obj_name;
std::string field_separator;
- int handle_stat_result(rgw::sal::Object::StatOp::Result& result,
+ int handle_stat_result(const DoutPrefixProvider *dpp,
+ rgw::sal::Object::StatOp::Result& result,
std::string& bucket_name,
rgw_obj_key& obj_key,
std::set<string>& obj_oids);
- int pop_and_handle_stat_op(RGWObjectCtx& obj_ctx,
+ int pop_and_handle_stat_op(const DoutPrefixProvider *dpp,
+ RGWObjectCtx& obj_ctx,
std::deque<std::unique_ptr<rgw::sal::Object::StatOp>>& ops);
public:
RGWSI_OTP_BE_Ctx be_ctx(op->ctx());
- int ret = svc.otp->store_all(be_ctx,
+ int ret = svc.otp->store_all(dpp, be_ctx,
entry,
obj->devices,
obj->mtime,
RGWSI_OTP_BE_Ctx be_ctx(op->ctx());
- return svc.otp->remove_all(be_ctx,
+ return svc.otp->remove_all(dpp, be_ctx,
entry,
&objv_tracker,
y);
});
}
-int RGWOTPCtl::store_all(const RGWOTPInfo& info,
+int RGWOTPCtl::store_all(const DoutPrefixProvider *dpp,
+ const RGWOTPInfo& info,
optional_yield y,
const PutParams& params)
{
return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) {
- return svc.otp->store_all(ctx, info.uid, info.devices, params.mtime, params.objv_tracker, y);
+ return svc.otp->store_all(dpp, ctx, info.uid, info.devices, params.mtime, params.objv_tracker, y);
});
}
-int RGWOTPCtl::remove_all(const rgw_user& uid,
+int RGWOTPCtl::remove_all(const DoutPrefixProvider *dpp,
+ const rgw_user& uid,
optional_yield y,
const RemoveParams& params)
{
return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) {
- return svc.otp->remove_all(ctx, uid, params.objv_tracker, y);
+ return svc.otp->remove_all(dpp, ctx, uid, params.objv_tracker, y);
});
}
int read_all(const rgw_user& uid, RGWOTPInfo *info, optional_yield y,
const DoutPrefixProvider *dpp,
const GetParams& params = {});
- int store_all(const RGWOTPInfo& info, optional_yield y,
+ int store_all(const DoutPrefixProvider *dpp,
+ const RGWOTPInfo& info, optional_yield y,
const PutParams& params = {});
- int remove_all(const rgw_user& user, optional_yield y,
+ int remove_all(const DoutPrefixProvider *dpp,
+ const rgw_user& user, optional_yield y,
const RemoveParams& params = {});
};
~Impl();
Cursor get_current() const { return current_cursor; }
- Cursor attach(RGWPeriod&& period, optional_yield y);
+ Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y);
Cursor insert(RGWPeriod&& period);
Cursor lookup(epoch_t realm_epoch);
histories.clear_and_dispose(std::default_delete<History>{});
}
-Cursor RGWPeriodHistory::Impl::attach(RGWPeriod&& period, optional_yield y)
+Cursor RGWPeriodHistory::Impl::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y)
{
if (current_history == histories.end()) {
return Cursor{-EINVAL};
}
if (predecessor_id.empty()) {
- lderr(cct) << "reached a period with an empty predecessor id" << dendl;
+ ldpp_dout(dpp, -1) << "reached a period with an empty predecessor id" << dendl;
return Cursor{-EINVAL};
}
// pull the period outside of the lock
- int r = puller->pull(predecessor_id, period, y);
+ int r = puller->pull(dpp, predecessor_id, period, y);
if (r < 0) {
return Cursor{r};
}
{
return impl->get_current();
}
-Cursor RGWPeriodHistory::attach(RGWPeriod&& period, optional_yield y)
+Cursor RGWPeriodHistory::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y)
{
- return impl->attach(std::move(period), y);
+ return impl->attach(dpp, std::move(period), y);
}
Cursor RGWPeriodHistory::insert(RGWPeriod&& period)
{
#include "include/ceph_assert.h"
#include "include/types.h"
#include "common/async/yield_context.h"
+#include "common/dout.h"
namespace bi = boost::intrusive;
public:
virtual ~Puller() = default;
- virtual int pull(const std::string& period_id, RGWPeriod& period,
+ virtual int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
optional_yield y) = 0;
};
/// current_period and the given period, reading predecessor periods or
/// fetching them from the master as necessary. returns a cursor at the
/// given period that can be used to traverse the current_history
- Cursor attach(RGWPeriod&& period, optional_yield y);
+ Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y);
/// insert the given period into an existing history, or create a new
/// unconnected history. similar to attach(), but it doesn't try to fetch
namespace {
// pull the given period over the connection
-int pull_period(RGWRESTConn* conn, const std::string& period_id,
+int pull_period(const DoutPrefixProvider *dpp, RGWRESTConn* conn, const std::string& period_id,
const std::string& realm_id, RGWPeriod& period,
optional_yield y)
{
bufferlist data;
#define MAX_REST_RESPONSE (128 * 1024)
- int r = conn->forward(user, info, nullptr, MAX_REST_RESPONSE, nullptr, &data, y);
+ int r = conn->forward(dpp, user, info, nullptr, MAX_REST_RESPONSE, nullptr, &data, y);
if (r < 0) {
return r;
}
} // anonymous namespace
-int RGWPeriodPuller::pull(const std::string& period_id, RGWPeriod& period,
+int RGWPeriodPuller::pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
optional_yield y)
{
// try to read the period from rados
period.set_id(period_id);
period.set_epoch(0);
- int r = period.init(cct, svc.sysobj, y);
+ int r = period.init(dpp, cct, svc.sysobj, y);
if (r < 0) {
if (svc.zone->is_meta_master()) {
// can't pull if we're the master
- ldout(cct, 1) << "metadata master failed to read period "
+ ldpp_dout(dpp, 1) << "metadata master failed to read period "
<< period_id << " from local storage: " << cpp_strerror(r) << dendl;
return r;
}
- ldout(cct, 14) << "pulling period " << period_id
+ ldpp_dout(dpp, 14) << "pulling period " << period_id
<< " from master" << dendl;
// request the period from the master zone
- r = pull_period(svc.zone->get_master_conn(), period_id,
+ r = pull_period(dpp, svc.zone->get_master_conn(), period_id,
svc.zone->get_realm().get_id(), period, y);
if (r < 0) {
- lderr(cct) << "failed to pull period " << period_id << dendl;
+ ldpp_dout(dpp, -1) << "failed to pull period " << period_id << dendl;
return r;
}
// write the period to rados
- r = period.store_info(true, y);
+ r = period.store_info(dpp, true, y);
if (r == -EEXIST) {
r = 0;
} else if (r < 0) {
- lderr(cct) << "failed to store period " << period_id << dendl;
+ ldpp_dout(dpp, -1) << "failed to store period " << period_id << dendl;
return r;
}
// update latest epoch
- r = period.update_latest_epoch(period.get_epoch(), y);
+ r = period.update_latest_epoch(dpp, period.get_epoch(), y);
if (r == -EEXIST) {
// already have this epoch (or a more recent one)
return 0;
}
if (r < 0) {
- lderr(cct) << "failed to update latest_epoch for period "
+ ldpp_dout(dpp, -1) << "failed to update latest_epoch for period "
<< period_id << dendl;
return r;
}
// reflect period objects if this is the latest version
if (svc.zone->get_realm().get_current_period() == period_id) {
- r = period.reflect(y);
+ r = period.reflect(dpp, y);
if (r < 0) {
return r;
}
}
- ldout(cct, 14) << "period " << period_id
+ ldpp_dout(dpp, 14) << "period " << period_id
<< " pulled and written to local storage" << dendl;
} else {
- ldout(cct, 14) << "found period " << period_id
+ ldpp_dout(dpp, 14) << "found period " << period_id
<< " in local storage" << dendl;
}
return 0;
public:
explicit RGWPeriodPuller(RGWSI_Zone *zone_svc, RGWSI_SysObj *sysobj_svc);
- int pull(const std::string& period_id, RGWPeriod& period, optional_yield y) override;
+ int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) override;
};
#endif // CEPH_RGW_PERIOD_PULLER_H
counter(0)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int PushAndRetryCR::operate()
+int PushAndRetryCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
for (;;) {
yield {
- ldout(cct, 10) << "pushing period " << period.get_id()
+ ldpp_dout(dpp, 10) << "pushing period " << period.get_id()
<< " to " << zone << dendl;
// initialize the http params
rgw_http_param_pair params[] = {
// stop on success
if (get_ret_status() == 0) {
- ldout(cct, 10) << "push to " << zone << " succeeded" << dendl;
+ ldpp_dout(dpp, 10) << "push to " << zone << " succeeded" << dendl;
return set_cr_done();
}
utime_t dur;
dur.set_from_double(timeout);
- ldout(cct, 10) << "waiting " << dur << "s for retry.." << dendl;
+ ldpp_dout(dpp, 10) << "waiting " << dur << "s for retry.." << dendl;
wait(dur);
timeout *= 2;
conns(std::move(conns))
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int PushAllCR::operate()
+int PushAllCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// spawn a coroutine to push the period over each connection
yield {
- ldout(cct, 4) << "sending " << conns.size() << " periods" << dendl;
+ ldpp_dout(dpp, 4) << "sending " << conns.size() << " periods" << dendl;
for (auto& c : conns)
spawn(new PushAndRetryCR(cct, c.first, &c.second, http, period), false);
}
}
/// A background thread to run the PushAllCR coroutine and exit.
-class RGWPeriodPusher::CRThread {
+class RGWPeriodPusher::CRThread : public DoutPrefixProvider {
+ CephContext* cct;
RGWCoroutinesManager coroutines;
RGWHTTPManager http;
boost::intrusive_ptr<PushAllCR> push_all;
public:
CRThread(CephContext* cct, RGWPeriod&& period,
std::map<std::string, RGWRESTConn>&& conns)
- : coroutines(cct, NULL),
+ : cct(cct), coroutines(cct, NULL),
http(cct, coroutines.get_completion_mgr()),
push_all(new PushAllCR(cct, &http, std::move(period), std::move(conns)))
{
http.start();
// must spawn the CR thread after start
- thread = std::thread([this]() noexcept { coroutines.run(push_all.get()); });
+ thread = std::thread([this]() noexcept { coroutines.run(this, push_all.get()); });
}
~CRThread()
{
if (thread.joinable())
thread.join();
}
+
+ CephContext *get_cct() const override { return cct; }
+ unsigned get_subsys() const override { return dout_subsys; }
+ std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw period pusher CR thread: "; }
};
-RGWPeriodPusher::RGWPeriodPusher(rgw::sal::Store* store,
+RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
optional_yield y)
: cct(store->ctx()), store(store)
{
// always send out the current period on startup
RGWPeriod period;
// XXX dang
- int r = period.init(cct, static_cast<rgw::sal::RadosStore* >(store)->svc()->sysobj, realm_id, y, realm.get_name());
+ int r = period.init(dpp, cct, static_cast<rgw::sal::RadosStore* >(store)->svc()->sysobj, realm_id, y, realm.get_name());
if (r < 0) {
- lderr(cct) << "failed to load period for realm " << realm_id << dendl;
+ ldpp_dout(dpp, -1) << "failed to load period for realm " << realm_id << dendl;
return;
}
class RGWPeriodPusher final : public RGWRealmWatcher::Watcher,
public RGWRealmReloader::Pauser {
public:
- explicit RGWPeriodPusher(rgw::sal::Store* store, optional_yield y);
+ explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y);
~RGWPeriodPusher() override;
/// respond to realm notifications by pushing new periods to other zones
void RGWProcess::RGWWQ::_process(RGWRequest *req, ThreadPool::TPHandle &) {
perfcounter->inc(l_rgw_qactive);
- process->handle_request(req);
+ process->handle_request(this, req);
process->req_throttle.put(1);
perfcounter->inc(l_rgw_qactive, -1);
}
int sock_fd;
std::string uri_prefix;
- struct RGWWQ : public ThreadPool::WorkQueue<RGWRequest> {
+ struct RGWWQ : public DoutPrefixProvider, public ThreadPool::WorkQueue<RGWRequest> {
RGWProcess* process;
RGWWQ(RGWProcess* p, ceph::timespan timeout, ceph::timespan suicide_timeout,
ThreadPool* tp)
void _clear() override {
ceph_assert(process->m_req_queue.empty());
}
+
+ CephContext *get_cct() const override { return process->cct; }
+ unsigned get_subsys() const { return ceph_subsys_rgw; }
+ std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw request work queue: ";}
+
} req_wq;
public:
virtual ~RGWProcess() = default;
virtual void run() = 0;
- virtual void handle_request(RGWRequest *req) = 0;
+ virtual void handle_request(const DoutPrefixProvider *dpp, RGWRequest *req) = 0;
void pause() {
m_tp.pause();
}
void run() override;
- void handle_request(RGWRequest* req) override;
+ void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override;
};
class RGWProcessControlThread : public Thread {
RGWProcess(cct, pe, num_threads, _conf) {}
void run() override;
void checkpoint();
- void handle_request(RGWRequest* req) override;
+ void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override;
void gen_request(const string& method, const string& resource,
int content_length, std::atomic<bool>* fail_flag);
get_meta_obj(&meta_obj);
}
-int RGWPubSub::remove(const rgw_raw_obj& obj,
+int RGWPubSub::remove(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
- int ret = rgw_delete_system_obj(store->svc()->sysobj, obj.pool, obj.oid, objv_tracker, y);
+ int ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, obj.pool, obj.oid, objv_tracker, y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWPubSub::write_topics(const rgw_pubsub_topics& topics,
+int RGWPubSub::write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_topics& topics,
RGWObjVersionTracker *objv_tracker, optional_yield y)
{
- int ret = write(meta_obj, topics, objv_tracker, y);
+ int ret = write(dpp, meta_obj, topics, objv_tracker, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
return ret;
}
return 0;
return 0;
}
-int RGWPubSub::Bucket::write_topics(const rgw_pubsub_bucket_topics& topics,
+int RGWPubSub::Bucket::write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_bucket_topics& topics,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
- int ret = ps->write(bucket_meta_obj, topics, objv_tracker, y);
+ int ret = ps->write(dpp, bucket_meta_obj, topics, objv_tracker, y);
if (ret < 0) {
ldout(ps->store->ctx(), 1) << "ERROR: failed to write bucket topics info: ret=" << ret << dendl;
return ret;
return 0;
}
-int RGWPubSub::Bucket::create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y) {
- return create_notification(topic_name, events, std::nullopt, "", y);
+int RGWPubSub::Bucket::create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y) {
+ return create_notification(dpp, topic_name, events, std::nullopt, "", y);
}
-int RGWPubSub::Bucket::create_notification(const string& topic_name,const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y) {
+int RGWPubSub::Bucket::create_notification(const DoutPrefixProvider *dpp, const string& topic_name,const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y) {
rgw_pubsub_topic_subs topic_info;
- rgw::sal::RadosStore* store = ps->store;
int ret = ps->get_topic(topic_name, &topic_info);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read topic '" << topic_name << "' info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read topic '" << topic_name << "' info: ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 20) << "successfully read topic '" << topic_name << "' info" << dendl;
+ ldpp_dout(dpp, 20) << "successfully read topic '" << topic_name << "' info" << dendl;
RGWObjVersionTracker objv_tracker;
rgw_pubsub_bucket_topics bucket_topics;
ret = read_topics(&bucket_topics, &objv_tracker);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read topics from bucket '" <<
+ ldpp_dout(dpp, 1) << "ERROR: failed to read topics from bucket '" <<
bucket.name << "': ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 20) << "successfully read " << bucket_topics.topics.size() << " topics from bucket '" <<
+ ldpp_dout(dpp, 20) << "successfully read " << bucket_topics.topics.size() << " topics from bucket '" <<
bucket.name << "'" << dendl;
auto& topic_filter = bucket_topics.topics[topic_name];
topic_filter.s3_filter = *s3_filter;
}
- ret = write_topics(bucket_topics, &objv_tracker, y);
+ ret = write_topics(dpp, bucket_topics, &objv_tracker, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to write topics to bucket '" << bucket.name << "': ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write topics to bucket '" << bucket.name << "': ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 20) << "successfully wrote " << bucket_topics.topics.size() << " topics to bucket '" << bucket.name << "'" << dendl;
+ ldpp_dout(dpp, 20) << "successfully wrote " << bucket_topics.topics.size() << " topics to bucket '" << bucket.name << "'" << dendl;
return 0;
}
-int RGWPubSub::Bucket::remove_notification(const string& topic_name, optional_yield y)
+int RGWPubSub::Bucket::remove_notification(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y)
{
rgw_pubsub_topic_subs topic_info;
- rgw::sal::RadosStore* store = ps->store;
int ret = ps->get_topic(topic_name, &topic_info);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read topic info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read topic info: ret=" << ret << dendl;
return ret;
}
ret = read_topics(&bucket_topics, &objv_tracker);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read bucket topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read bucket topics info: ret=" << ret << dendl;
return ret;
}
if (bucket_topics.topics.empty()) {
// no more topics - delete the notification object of the bucket
- ret = ps->remove(bucket_meta_obj, &objv_tracker, y);
+ ret = ps->remove(dpp, bucket_meta_obj, &objv_tracker, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(ps->store->ctx(), 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl;
return ret;
}
return 0;
}
// write back the notifications without the deleted one
- ret = write_topics(bucket_topics, &objv_tracker, y);
+ ret = write_topics(dpp, bucket_topics, &objv_tracker, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWPubSub::Bucket::remove_notifications(optional_yield y)
+int RGWPubSub::Bucket::remove_notifications(const DoutPrefixProvider *dpp, optional_yield y)
{
// get all topics on a bucket
rgw_pubsub_bucket_topics bucket_topics;
auto ret = get_topics(&bucket_topics);
if (ret < 0 && ret != -ENOENT) {
- ldout(ps->store->ctx(), 1) << "ERROR: failed to get list of topics from bucket '" << bucket.name << "', ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to get list of topics from bucket '" << bucket.name << "', ret=" << ret << dendl;
return ret ;
}
// remove all auto-genrated topics
for (const auto& topic : bucket_topics.topics) {
const auto& topic_name = topic.first;
- ret = ps->remove_topic(topic_name, y);
+ ret = ps->remove_topic(dpp, topic_name, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(ps->store->ctx(), 5) << "WARNING: failed to remove auto-generated topic '" << topic_name << "', ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "WARNING: failed to remove auto-generated topic '" << topic_name << "', ret=" << ret << dendl;
}
}
// delete the notification object of the bucket
- ret = ps->remove(bucket_meta_obj, nullptr, y);
+ ret = ps->remove(dpp, bucket_meta_obj, nullptr, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(ps->store->ctx(), 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWPubSub::create_topic(const string& name, optional_yield y) {
- return create_topic(name, rgw_pubsub_sub_dest(), "", "", y);
+int RGWPubSub::create_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y) {
+ return create_topic(dpp, name, rgw_pubsub_sub_dest(), "", "", y);
}
-int RGWPubSub::create_topic(const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y) {
+int RGWPubSub::create_topic(const DoutPrefixProvider *dpp, const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y) {
RGWObjVersionTracker objv_tracker;
rgw_pubsub_topics topics;
int ret = read_topics(&topics, &objv_tracker);
if (ret < 0 && ret != -ENOENT) {
// its not an error if not topics exist, we create one
- ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl;
return ret;
}
new_topic.topic.arn = arn;
new_topic.topic.opaque_data = opaque_data;
- ret = write_topics(topics, &objv_tracker, y);
+ ret = write_topics(dpp, topics, &objv_tracker, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWPubSub::remove_topic(const string& name, optional_yield y)
+int RGWPubSub::remove_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y)
{
RGWObjVersionTracker objv_tracker;
rgw_pubsub_topics topics;
int ret = read_topics(&topics, &objv_tracker);
if (ret < 0 && ret != -ENOENT) {
- ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl;
return ret;
} else if (ret == -ENOENT) {
// its not an error if no topics exist, just a no-op
- ldout(store->ctx(), 10) << "WARNING: failed to read topics info, deletion is a no-op: ret=" << ret << dendl;
+ ldpp_dout(dpp, 10) << "WARNING: failed to read topics info, deletion is a no-op: ret=" << ret << dendl;
return 0;
}
topics.topics.erase(name);
- ret = write_topics(topics, &objv_tracker, y);
+ ret = write_topics(dpp, topics, &objv_tracker, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to remove topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove topics info: ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWPubSub::Sub::write_sub(const rgw_pubsub_sub_config& sub_conf,
+int RGWPubSub::Sub::write_sub(const DoutPrefixProvider *dpp,
+ const rgw_pubsub_sub_config& sub_conf,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
- int ret = ps->write(sub_meta_obj, sub_conf, objv_tracker, y);
+ int ret = ps->write(dpp, sub_meta_obj, sub_conf, objv_tracker, y);
if (ret < 0) {
- ldout(ps->store->ctx(), 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWPubSub::Sub::remove_sub(RGWObjVersionTracker *objv_tracker,
+int RGWPubSub::Sub::remove_sub(const DoutPrefixProvider *dpp, RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
- int ret = ps->remove(sub_meta_obj, objv_tracker, y);
+ int ret = ps->remove(dpp, sub_meta_obj, objv_tracker, y);
if (ret < 0) {
- ldout(ps->store->ctx(), 1) << "ERROR: failed to remove subscription info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove subscription info: ret=" << ret << dendl;
return ret;
}
return read_sub(result, nullptr);
}
-int RGWPubSub::Sub::subscribe(const string& topic, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id)
+int RGWPubSub::Sub::subscribe(const DoutPrefixProvider *dpp, const string& topic, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id)
{
RGWObjVersionTracker objv_tracker;
rgw_pubsub_topics topics;
- rgw::sal::RadosStore* store = ps->store;
int ret = ps->read_topics(&topics, &objv_tracker);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl;
return ret != -ENOENT ? ret : -EINVAL;
}
auto iter = topics.topics.find(topic);
if (iter == topics.topics.end()) {
- ldout(store->ctx(), 1) << "ERROR: cannot add subscription to topic: topic not found" << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: cannot add subscription to topic: topic not found" << dendl;
return -EINVAL;
}
t.subs.insert(sub);
- ret = ps->write_topics(topics, &objv_tracker, y);
+ ret = ps->write_topics(dpp, topics, &objv_tracker, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
return ret;
}
- ret = write_sub(sub_conf, nullptr, y);
+ ret = write_sub(dpp, sub_conf, nullptr, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWPubSub::Sub::unsubscribe(const string& _topic, optional_yield y)
+int RGWPubSub::Sub::unsubscribe(const DoutPrefixProvider *dpp, const string& _topic, optional_yield y)
{
string topic = _topic;
RGWObjVersionTracker sobjv_tracker;
- rgw::sal::RadosStore* store = ps->store;
if (topic.empty()) {
rgw_pubsub_sub_config sub_conf;
int ret = read_sub(&sub_conf, &sobjv_tracker);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read subscription info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read subscription info: ret=" << ret << dendl;
return ret;
}
topic = sub_conf.topic;
int ret = ps->read_topics(&topics, &objv_tracker);
if (ret < 0) {
// not an error - could be that topic was already deleted
- ldout(store->ctx(), 10) << "WARNING: failed to read topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 10) << "WARNING: failed to read topics info: ret=" << ret << dendl;
} else {
auto iter = topics.topics.find(topic);
if (iter != topics.topics.end()) {
t.subs.erase(sub);
- ret = ps->write_topics(topics, &objv_tracker, y);
+ ret = ps->write_topics(dpp, topics, &objv_tracker, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl;
return ret;
}
}
}
- ret = remove_sub(&sobjv_tracker, y);
+ ret = remove_sub(dpp, &sobjv_tracker, y);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to delete subscription info: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to delete subscription info: ret=" << ret << dendl;
return ret;
}
return 0;
int read(const rgw_raw_obj& obj, T* data, RGWObjVersionTracker* objv_tracker);
template <class T>
- int write(const rgw_raw_obj& obj, const T& info,
+ int write(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const T& info,
RGWObjVersionTracker* obj_tracker, optional_yield y);
- int remove(const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker,
+ int remove(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker,
optional_yield y);
int read_topics(rgw_pubsub_topics *result, RGWObjVersionTracker* objv_tracker);
- int write_topics(const rgw_pubsub_topics& topics,
+ int write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_topics& topics,
RGWObjVersionTracker* objv_tracker, optional_yield y);
public:
// set the list of topics associated with a bucket
// use version tacker to enforce atomicity between read/write
// return 0 on success, error code otherwise
- int write_topics(const rgw_pubsub_bucket_topics& topics,
+ int write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_bucket_topics& topics,
RGWObjVersionTracker* objv_tracker, optional_yield y);
public:
Bucket(RGWPubSub *_ps, const rgw_bucket& _bucket) : ps(_ps), bucket(_bucket) {
// for S3 compliant notifications the version with: s3_filter and notif_name should be used
// return -ENOENT if the topic does not exists
// return 0 on success, error code otherwise
- int create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y);
- int create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y);
+ int create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y);
+ int create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y);
// remove a topic and filter from bucket
// if the topic does not exists on the bucket it is a no-op (considered success)
// return -ENOENT if the topic does not exists
// return 0 on success, error code otherwise
- int remove_notification(const string& topic_name, optional_yield y);
+ int remove_notification(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y);
// remove all notifications (and autogenerated topics) associated with the bucket
// return 0 on success or if no topic was associated with the bucket, error code otherwise
- int remove_notifications(optional_yield y);
+ int remove_notifications(const DoutPrefixProvider *dpp, optional_yield y);
};
// base class for subscription
rgw_raw_obj sub_meta_obj;
int read_sub(rgw_pubsub_sub_config *result, RGWObjVersionTracker* objv_tracker);
- int write_sub(const rgw_pubsub_sub_config& sub_conf,
+ int write_sub(const DoutPrefixProvider *dpp, const rgw_pubsub_sub_config& sub_conf,
RGWObjVersionTracker* objv_tracker, optional_yield y);
- int remove_sub(RGWObjVersionTracker* objv_tracker, optional_yield y);
+ int remove_sub(const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker, optional_yield y);
public:
Sub(RGWPubSub *_ps, const std::string& _sub) : ps(_ps), sub(_sub) {
ps->get_sub_meta_obj(sub, &sub_meta_obj);
virtual ~Sub() = default;
- int subscribe(const string& topic_name, const rgw_pubsub_sub_dest& dest, optional_yield y,
+ int subscribe(const DoutPrefixProvider *dpp, const string& topic_name, const rgw_pubsub_sub_dest& dest, optional_yield y,
const std::string& s3_id="");
- int unsubscribe(const string& topic_name, optional_yield y);
+ int unsubscribe(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y);
int get_conf(rgw_pubsub_sub_config* result);
static const int DEFAULT_MAX_EVENTS = 100;
// create a topic with a name only
// if the topic already exists it is a no-op (considered success)
// return 0 on success, error code otherwise
- int create_topic(const string& name, optional_yield y);
+ int create_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y);
// create a topic with push destination information and ARN
// if the topic already exists the destination and ARN values may be updated (considered succsess)
// return 0 on success, error code otherwise
- int create_topic(const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y);
+ int create_topic(const DoutPrefixProvider *dpp, const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y);
// remove a topic according to its name
// if the topic does not exists it is a no-op (considered success)
// return 0 on success, error code otherwise
- int remove_topic(const string& name, optional_yield y);
+ int remove_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y);
};
}
template <class T>
-int RGWPubSub::write(const rgw_raw_obj& obj, const T& info,
+int RGWPubSub::write(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const T& info,
RGWObjVersionTracker* objv_tracker, optional_yield y)
{
bufferlist bl;
encode(info, bl);
- int ret = rgw_put_system_obj(obj_ctx, obj.pool, obj.oid,
+ int ret = rgw_put_system_obj(dpp, obj_ctx, obj.pool, obj.oid,
bl, false, objv_tracker,
real_time(), y);
if (ret < 0) {
}
// send message to endpoint
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
init_new_io(this);
const auto rc = sync_env->http_manager->add_request(this);
if (rc < 0) {
topic(_topic), conn(_conn), message(_message) {}
// send message to endpoint, without waiting for reply
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
const auto rc = amqp::publish(conn, topic, message);
if (rc < 0) {
topic(_topic), conn(_conn), message(_message) {}
// send message to endpoint, waiting for reply
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
init_new_io(this);
topic(_topic), conn(_conn), message(_message) {}
// send message to endpoint, without waiting for reply
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
const auto rc = kafka::publish(conn, topic, message);
if (rc < 0) {
topic(_topic), conn(_conn), message(_message) {}
// send message to endpoint, waiting for reply
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
init_new_io(this);
bucket->get_object(rgw_obj_key(mp.get_meta(), std::string(), RGW_OBJ_NS_MULTIPART));
meta_obj->set_in_extra_data(true);
- r = meta_obj->omap_set_val_by_key(p, bl, true, null_yield);
+ r = meta_obj->omap_set_val_by_key(dpp, p, bl, true, null_yield);
if (r < 0) {
return r == -ENOENT ? -ERR_NO_SUCH_UPLOAD : r;
}
//For Append obj, disable versioning
obj_op->params.versioning_disabled = true;
if (cur_manifest) {
- cur_manifest->append(manifest, store->get_zone());
+ cur_manifest->append(dpp, manifest, store->get_zone());
obj_op->params.manifest = cur_manifest;
} else {
obj_op->params.manifest = &manifest;
if (qs.async_refresh_time.sec() > 0 && now >= qs.async_refresh_time) {
int r = async_refresh(user, bucket, qs);
if (r < 0) {
- ldout(store->ctx(), 0) << "ERROR: quota async refresh returned ret=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: quota async refresh returned ret=" << r << dendl;
/* continue processing, might be a transient error, async refresh is just optimization */
}
const DoutPrefix dp(store->ctx(), dout_subsys, "rgw bucket async refresh handler: ");
int r = store->get_bucket(&dp, nullptr, bucket, &rbucket, null_yield);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
+ ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
return r;
}
- ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl;
+ ldpp_dout(&dp, 20) << "initiating async quota refresh for bucket=" << bucket << dendl;
- r = rbucket->get_bucket_stats_async(RGW_NO_SHARD, this);
+ r = rbucket->get_bucket_stats_async(&dp, RGW_NO_SHARD, this);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl;
+ ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket.name << dendl;
/* get_bucket_stats_async() dropped our reference already */
return r;
int r = store->get_bucket(dpp, user.get(), _b, &bucket, y);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl;
return r;
}
string master_ver;
map<RGWObjCategory, RGWStorageStats> bucket_stats;
- r = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, bucket_stats);
+ r = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, bucket_stats);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get bucket stats for bucket="
+ ldpp_dout(dpp, 0) << "could not get bucket stats for bucket="
<< _b.name << dendl;
return r;
}
class UserAsyncRefreshHandler : public RGWQuotaCache<rgw_user>::AsyncRefreshHandler,
public RGWGetUserStats_CB {
+ const DoutPrefixProvider *dpp;
rgw_bucket bucket;
public:
- UserAsyncRefreshHandler(rgw::sal::Store* _store, RGWQuotaCache<rgw_user> *_cache,
+ UserAsyncRefreshHandler(const DoutPrefixProvider *_dpp, rgw::sal::Store* _store, RGWQuotaCache<rgw_user> *_cache,
const rgw_user& _user, const rgw_bucket& _bucket) :
RGWQuotaCache<rgw_user>::AsyncRefreshHandler(_store, _cache),
RGWGetUserStats_CB(_user),
+ dpp(_dpp),
bucket(_bucket) {}
void drop_reference() override { put(); }
{
std::unique_ptr<rgw::sal::User> ruser = store->get_user(user);
- ldout(store->ctx(), 20) << "initiating async quota refresh for user=" << user << dendl;
- int r = ruser->read_stats_async(this);
+ ldpp_dout(dpp, 20) << "initiating async quota refresh for user=" << user << dendl;
+ int r = ruser->read_stats_async(dpp, this);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get bucket info for user=" << user << dendl;
+ ldpp_dout(dpp, 0) << "could not get bucket info for user=" << user << dendl;
/* get_bucket_stats_async() dropped our reference already */
return r;
}
class RGWUserStatsCache : public RGWQuotaCache<rgw_user> {
+ const DoutPrefixProvider *dpp;
std::atomic<bool> down_flag = { false };
ceph::shared_mutex mutex = ceph::make_shared_mutex("RGWUserStatsCache");
map<rgw_bucket, rgw_user> modified_buckets;
}
public:
- RGWUserStatsCache(rgw::sal::Store* _store, bool quota_threads)
- : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size)
+ RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads)
+ : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp)
{
if (quota_threads) {
buckets_sync_thread = new BucketsSyncThread(store->ctx(), this);
}
AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override {
- return new UserAsyncRefreshHandler(store, this, user, bucket);
+ return new UserAsyncRefreshHandler(dpp, store, this, user, bucket);
}
bool can_use_cached_stats(RGWQuotaInfo& quota, RGWStorageStats& stats) override {
const DoutPrefixProvider *dpp)
{
std::unique_ptr<rgw::sal::User> user = store->get_user(_u);
- int r = user->read_stats(y, &stats);
+ int r = user->read_stats(dpp, y, &stats);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get user stats for user=" << user << dendl;
+ ldpp_dout(dpp, 0) << "could not get user stats for user=" << user << dendl;
return r;
}
int r = store->get_bucket(dpp, user.get(), _b, &bucket, y);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl;
return r;
}
- r = bucket->sync_user_stats(y);
+ r = bucket->sync_user_stats(dpp, y);
if (r < 0) {
- ldout(store->ctx(), 0) << "ERROR: sync_user_stats() for user=" << _u << ", bucket=" << bucket << " returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: sync_user_stats() for user=" << _u << ", bucket=" << bucket << " returned " << r << dendl;
return r;
}
ceph::real_time last_stats_update;
std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(_u.to_str()));
- int ret = user->read_stats(y, &stats, &last_stats_sync, &last_stats_update);
+ int ret = user->read_stats(dpp, y, &stats, &last_stats_sync, &last_stats_update);
if (ret < 0) {
- ldout(store->ctx(), 5) << "ERROR: can't read user header: ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "ERROR: can't read user header: ret=" << ret << dendl;
return ret;
}
if (!store->ctx()->_conf->rgw_user_quota_sync_idle_users &&
last_stats_update < last_stats_sync) {
- ldout(store->ctx(), 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl;
+ ldpp_dout(dpp, 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl;
return 0;
}
ret = rgw_user_sync_all_stats(dpp, store, user.get(), y);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed user stats sync, ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed user stats sync, ret=" << ret << dendl;
return ret;
}
string key = "user";
void *handle;
- int ret = store->meta_list_keys_init(key, string(), &handle);
+ int ret = store->meta_list_keys_init(dpp, key, string(), &handle);
if (ret < 0) {
- ldout(store->ctx(), 10) << "ERROR: can't get key: ret=" << ret << dendl;
+ ldpp_dout(dpp, 10) << "ERROR: can't get key: ret=" << ret << dendl;
return ret;
}
list<string> keys;
ret = store->meta_list_keys_next(handle, max, keys, &truncated);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
goto done;
}
for (list<string>::iterator iter = keys.begin();
iter != keys.end() && !going_down();
++iter) {
rgw_user user(*iter);
- ldout(store->ctx(), 20) << "RGWUserStatsCache: sync user=" << user << dendl;
+ ldpp_dout(dpp, 20) << "RGWUserStatsCache: sync user=" << user << dendl;
int ret = sync_user(dpp, user, y);
if (ret < 0) {
- ldout(store->ctx(), 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl;
/* continuing to next user */
continue;
return 0;
}
public:
- RGWQuotaHandlerImpl(rgw::sal::Store* _store, bool quota_threads) : store(_store),
+ RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads) : store(_store),
bucket_stats_cache(_store),
- user_stats_cache(_store, quota_threads) {}
+ user_stats_cache(dpp, _store, quota_threads) {}
int check_quota(const rgw_user& user,
rgw_bucket& bucket,
};
-RGWQuotaHandler *RGWQuotaHandler::generate_handler(rgw::sal::Store* store, bool quota_threads)
+RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads)
{
- return new RGWQuotaHandlerImpl(store, quota_threads);
+ return new RGWQuotaHandlerImpl(dpp, store, quota_threads);
}
void RGWQuotaHandler::free_handler(RGWQuotaHandler *handler)
virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0;
- static RGWQuotaHandler *generate_handler(rgw::sal::Store* store, bool quota_threads);
+ static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads);
static void free_handler(RGWQuotaHandler *handler);
};
http_manager.start();
}
- int notify_all(map<rgw_zone_id, RGWRESTConn *>& conn_map, set<int>& shards) {
+ int notify_all(const DoutPrefixProvider *dpp, map<rgw_zone_id, RGWRESTConn *>& conn_map, set<int>& shards) {
rgw_http_param_pair pairs[] = { { "type", "metadata" },
{ "notify", NULL },
{ NULL, NULL } };
stacks.push_back(stack);
}
- return run(stacks);
+ return run(dpp, stacks);
}
};
http_manager.start();
}
- int notify_all(map<rgw_zone_id, RGWRESTConn *>& conn_map,
+ int notify_all(const DoutPrefixProvider *dpp, map<rgw_zone_id, RGWRESTConn *>& conn_map,
bc::flat_map<int, bc::flat_set<string> >& shards) {
rgw_http_param_pair pairs[] = { { "type", "data" },
{ "notify", NULL },
stacks.push_back(stack);
}
- return run(stacks);
+ return run(dpp, stacks);
}
};
do {
auto start = ceph::real_clock::now();
- int r = processor->process();
+ int r = processor->process(this);
if (r < 0) {
- dout(0) << "ERROR: processor->process() returned error r=" << r << dendl;
+ ldpp_dout(this, 0) << "ERROR: processor->process() returned error r=" << r << dendl;
}
if (processor->going_down())
RGWMetaNotifier(RGWRados *_store, RGWMetadataLog* log)
: RGWRadosThread(_store, "meta-notifier"), notify_mgr(_store), log(log) {}
- int process() override;
+ int process(const DoutPrefixProvider *dpp) override;
};
-int RGWMetaNotifier::process()
+int RGWMetaNotifier::process(const DoutPrefixProvider *dpp)
{
set<int> shards;
}
for (set<int>::iterator iter = shards.begin(); iter != shards.end(); ++iter) {
- ldout(cct, 20) << __func__ << "(): notifying mdlog change, shard_id=" << *iter << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): notifying mdlog change, shard_id=" << *iter << dendl;
}
- notify_mgr.notify_all(store->svc.zone->get_zone_conn_map(), shards);
+ notify_mgr.notify_all(dpp, store->svc.zone->get_zone_conn_map(), shards);
return 0;
}
public:
RGWDataNotifier(RGWRados *_store) : RGWRadosThread(_store, "data-notifier"), notify_mgr(_store) {}
- int process() override;
+ int process(const DoutPrefixProvider *dpp) override;
};
-int RGWDataNotifier::process()
+int RGWDataNotifier::process(const DoutPrefixProvider *dpp)
{
auto data_log = store->svc.datalog_rados;
if (!data_log) {
}
for (const auto& [shard_id, keys] : shards) {
- ldout(cct, 20) << __func__ << "(): notifying datalog change, shard_id="
+ ldpp_dout(dpp, 20) << __func__ << "(): notifying datalog change, shard_id="
<< shard_id << ": " << keys << dendl;
}
- notify_mgr.notify_all(store->svc.zone->get_zone_data_notify_to_map(), shards);
+ notify_mgr.notify_all(dpp, store->svc.zone->get_zone_data_notify_to_map(), shards);
return 0;
}
RGWSyncProcessorThread(RGWRados *_store, const string& thread_name = "radosgw") : RGWRadosThread(_store, thread_name) {}
RGWSyncProcessorThread(RGWRados *_store) : RGWRadosThread(_store) {}
~RGWSyncProcessorThread() override {}
- int init() override = 0 ;
- int process() override = 0;
+ int init(const DoutPrefixProvider *dpp) override = 0 ;
+ int process(const DoutPrefixProvider *dpp) override = 0;
};
class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread
}
RGWMetaSyncStatusManager* get_manager() { return &sync; }
- int init() override {
- int ret = sync.init();
+ int init(const DoutPrefixProvider *dpp) override {
+ int ret = sync.init(dpp);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: sync.init() returned " << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: sync.init() returned " << ret << dendl;
return ret;
}
return 0;
}
- int process() override {
- sync.run(null_yield);
+ int process(const DoutPrefixProvider *dpp) override {
+ sync.run(dpp, null_yield);
return 0;
}
};
}
RGWDataSyncStatusManager* get_manager() { return &sync; }
- int init() override {
+ int init(const DoutPrefixProvider *dpp) override {
return 0;
}
- int process() override {
+ int process(const DoutPrefixProvider *dpp) override {
while (!initialized) {
if (going_down()) {
return 0;
}
- int ret = sync.init();
+ int ret = sync.init(dpp);
if (ret >= 0) {
initialized = true;
break;
/* we'll be back! */
return 0;
}
- sync.run();
+ sync.run(dpp);
return 0;
}
};
trim_interval(interval, 0)
{}
- int init() override {
+ int init(const DoutPrefixProvider *dpp) override {
return http.start();
}
- int process() override {
+ int process(const DoutPrefixProvider *dpp) override {
list<RGWCoroutinesStack*> stacks;
auto meta = new RGWCoroutinesStack(store->ctx(), &crs);
meta->call(create_meta_log_trim_cr(this, static_cast<rgw::sal::RadosStore*>(store), &http,
if (store->svc()->zone->sync_module_exports_data()) {
auto data = new RGWCoroutinesStack(store->ctx(), &crs);
- data->call(create_data_log_trim_cr(static_cast<rgw::sal::RadosStore*>(store), &http,
+ data->call(create_data_log_trim_cr(dpp, static_cast<rgw::sal::RadosStore*>(store), &http,
cct->_conf->rgw_data_log_num_shards,
trim_interval));
stacks.push_back(data);
stacks.push_back(bucket);
}
- crs.run(stacks);
+ crs.run(dpp, stacks);
return 0;
}
return thread->second->get_manager();
}
-int RGWRados::get_required_alignment(const rgw_pool& pool, uint64_t *alignment)
+int RGWRados::get_required_alignment(const DoutPrefixProvider *dpp, const rgw_pool& pool, uint64_t *alignment)
{
IoCtx ioctx;
- int r = open_pool_ctx(pool, ioctx, false);
+ int r = open_pool_ctx(dpp, pool, ioctx, false);
if (r < 0) {
ldout(cct, 0) << "ERROR: open_pool_ctx() returned " << r << dendl;
return r;
int RGWRados::get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment)
{
uint64_t alignment;
- int r = get_required_alignment(pool, &alignment);
+ int r = get_required_alignment(dpp, pool, &alignment);
if (r < 0) {
return r;
}
RGWIndexCompletionThread(RGWRados *_store)
: RGWRadosThread(_store, "index-complete"), store(_store) {}
- int process() override;
+ int process(const DoutPrefixProvider *dpp) override;
void add_completion(complete_op_data *completion) {
{
std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw index completion thread: "; }
};
-int RGWIndexCompletionThread::process()
+int RGWIndexCompletionThread::process(const DoutPrefixProvider *dpp)
{
list<complete_op_data *> comps;
if (going_down()) {
continue;
}
- ldout(store->ctx(), 20) << __func__ << "(): handling completion for key=" << c->key << dendl;
+ ldpp_dout(this, 20) << __func__ << "(): handling completion for key=" << c->key << dendl;
RGWRados::BucketShard bs(store);
RGWBucketInfo bucket_info;
int r = bs.init(c->obj.bucket, c->obj, &bucket_info, this);
if (r < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl;
+ ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl;
/* not much to do */
continue;
}
cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_complete_op(o, c->op, c->tag, c->ver, c->key, c->dir_meta, &c->remove_objs,
c->log_op, c->bilog_op, &c->zones_trace);
- return bs->bucket_obj.operate(&o, null_yield);
+ return bs->bucket_obj.operate(this, &o, null_yield);
});
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl;
}
r = store->svc.datalog_rados->add_entry(this, bucket_info, bs.shard_id);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
+ ldpp_dout(this, -1) << "ERROR: failed writing data log" << dendl;
}
}
complete_op_data **result);
bool handle_completion(completion_t cb, complete_op_data *arg);
- int start() {
+ int start(const DoutPrefixProvider *dpp) {
completion_thread = new RGWIndexCompletionThread(store);
- int ret = completion_thread->init();
+ int ret = completion_thread->init(dpp);
if (ret < 0) {
return ret;
}
*/
sync_module = svc.sync_modules->get_sync_module();
- ret = open_root_pool_ctx();
+ ret = open_root_pool_ctx(dpp);
if (ret < 0)
return ret;
- ret = open_gc_pool_ctx();
+ ret = open_gc_pool_ctx(dpp);
if (ret < 0)
return ret;
- ret = open_lc_pool_ctx();
+ ret = open_lc_pool_ctx(dpp);
if (ret < 0)
return ret;
- ret = open_objexp_pool_ctx();
+ ret = open_objexp_pool_ctx(dpp);
if (ret < 0)
return ret;
- ret = open_reshard_pool_ctx();
+ ret = open_reshard_pool_ctx(dpp);
if (ret < 0)
return ret;
- ret = open_notif_pool_ctx();
+ ret = open_notif_pool_ctx(dpp);
if (ret < 0)
return ret;
auto async_processor = svc.rados->get_async_processor();
std::lock_guard l{meta_sync_thread_lock};
meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->store, async_processor);
- ret = meta_sync_processor_thread->init();
+ ret = meta_sync_processor_thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize meta sync thread" << dendl;
return ret;
std::lock_guard dl{data_sync_thread_lock};
for (auto source_zone : svc.zone->get_data_sync_source_zones()) {
- ldout(cct, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
+ ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
auto *thread = new RGWDataSyncProcessorThread(this->store, svc.rados->get_async_processor(), source_zone);
- ret = thread->init();
+ ret = thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl;
return ret;
auto interval = cct->_conf->rgw_sync_log_trim_interval;
if (interval > 0) {
sync_log_trimmer = new RGWSyncLogTrimThread(this->store, &*bucket_trim, interval);
- ret = sync_log_trimmer->init();
+ ret = sync_log_trimmer->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize sync log trim thread" << dendl;
return ret;
if (use_lc_thread)
lc->start_processor();
- quota_handler = RGWQuotaHandler::generate_handler(this->store, quota_threads);
+ quota_handler = RGWQuotaHandler::generate_handler(dpp, this->store, quota_threads);
bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards :
zone.bucket_index_max_shards);
}
index_completion_manager = new RGWIndexCompletionManager(this);
- ret = index_completion_manager->start();
+ ret = index_completion_manager->start(dpp);
if (ret < 0) {
return ret;
}
* Open the pool used as root for this gateway
* Returns: 0 on success, -ERR# otherwise.
*/
-int RGWRados::open_root_pool_ctx()
+int RGWRados::open_root_pool_ctx(const DoutPrefixProvider *dpp)
{
- return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().domain_root, root_pool_ctx, true, true);
+ return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().domain_root, root_pool_ctx, true, true);
}
-int RGWRados::open_gc_pool_ctx()
+int RGWRados::open_gc_pool_ctx(const DoutPrefixProvider *dpp)
{
- return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().gc_pool, gc_pool_ctx, true, true);
+ return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().gc_pool, gc_pool_ctx, true, true);
}
-int RGWRados::open_lc_pool_ctx()
+int RGWRados::open_lc_pool_ctx(const DoutPrefixProvider *dpp)
{
- return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().lc_pool, lc_pool_ctx, true, true);
+ return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().lc_pool, lc_pool_ctx, true, true);
}
-int RGWRados::open_objexp_pool_ctx()
+int RGWRados::open_objexp_pool_ctx(const DoutPrefixProvider *dpp)
{
- return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, objexp_pool_ctx, true, true);
+ return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, objexp_pool_ctx, true, true);
}
-int RGWRados::open_reshard_pool_ctx()
+int RGWRados::open_reshard_pool_ctx(const DoutPrefixProvider *dpp)
{
- return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().reshard_pool, reshard_pool_ctx, true, true);
+ return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().reshard_pool, reshard_pool_ctx, true, true);
}
-int RGWRados::open_notif_pool_ctx()
+int RGWRados::open_notif_pool_ctx(const DoutPrefixProvider *dpp)
{
- return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().notif_pool, notif_pool_ctx, true, true);
+ return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().notif_pool, notif_pool_ctx, true, true);
}
-int RGWRados::open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx,
+int RGWRados::open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
bool mostly_omap)
{
constexpr bool create = true; // create the pool if it doesn't exist
- return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, create, mostly_omap);
+ return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create, mostly_omap);
}
/**** logs ****/
librados::NObjectIterator obit;
};
-int RGWRados::log_list_init(const string& prefix, RGWAccessHandle *handle)
+int RGWRados::log_list_init(const DoutPrefixProvider *dpp, const string& prefix, RGWAccessHandle *handle)
{
log_list_state *state = new log_list_state;
- int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx);
+ int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx);
if (r < 0) {
delete state;
return r;
return 0;
}
-int RGWRados::log_remove(const string& name)
+int RGWRados::log_remove(const DoutPrefixProvider *dpp, const string& name)
{
librados::IoCtx io_ctx;
- int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, io_ctx);
+ int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, io_ctx);
if (r < 0)
return r;
return io_ctx.remove(name);
log_show_state() : pos(0), eof(false) {}
};
-int RGWRados::log_show_init(const string& name, RGWAccessHandle *handle)
+int RGWRados::log_show_init(const DoutPrefixProvider *dpp, const string& name, RGWAccessHandle *handle)
{
log_show_state *state = new log_show_state;
- int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx);
+ int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx);
if (r < 0) {
delete state;
return r;
hash = buf;
}
-int RGWRados::log_usage(map<rgw_user_bucket, RGWUsageBatch>& usage_info)
+int RGWRados::log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info)
{
uint32_t index = 0;
RGWUsageBatch& info = iter->second;
if (ub.user.empty()) {
- ldout(cct, 0) << "WARNING: RGWRados::log_usage(): user name empty (bucket=" << ub.bucket << "), skipping" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: RGWRados::log_usage(): user name empty (bucket=" << ub.bucket << "), skipping" << dendl;
continue;
}
map<string, rgw_usage_log_info>::iterator liter;
for (liter = log_objs.begin(); liter != log_objs.end(); ++liter) {
- int r = cls_obj_usage_log_add(liter->first, liter->second);
+ int r = cls_obj_usage_log_add(dpp, liter->first, liter->second);
if (r < 0)
return r;
}
return 0;
}
-int RGWRados::read_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch,
+int RGWRados::read_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket,
rgw_usage_log_entry>& usage)
{
map<rgw_user_bucket, rgw_usage_log_entry> ret_usage;
map<rgw_user_bucket, rgw_usage_log_entry>::iterator iter;
- int ret = cls_obj_usage_log_read(hash, user_str, bucket_name, start_epoch, end_epoch, num,
+ int ret = cls_obj_usage_log_read(dpp, hash, user_str, bucket_name, start_epoch, end_epoch, num,
usage_iter.read_iter, ret_usage, is_truncated);
if (ret == -ENOENT)
goto next;
return 0;
}
-int RGWRados::trim_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch)
+int RGWRados::trim_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch)
{
uint32_t index = 0;
string hash, first_hash;
hash = first_hash;
do {
- int ret = cls_obj_usage_log_trim(hash, user_str, bucket_name, start_epoch, end_epoch);
+ int ret = cls_obj_usage_log_trim(dpp, hash, user_str, bucket_name, start_epoch, end_epoch);
if (ret < 0 && ret != -ENOENT)
return ret;
}
-int RGWRados::clear_usage()
+int RGWRados::clear_usage(const DoutPrefixProvider *dpp)
{
auto max_shards = cct->_conf->rgw_usage_max_shards;
int ret=0;
for (unsigned i=0; i < max_shards; i++){
string oid = RGW_USAGE_OBJ_PREFIX + to_string(i);
- ret = cls_obj_usage_log_clear(oid);
+ ret = cls_obj_usage_log_clear(dpp, oid);
if (ret < 0){
- ldout(cct,0) << "usage clear on oid="<< oid << "failed with ret=" << ret << dendl;
+ ldpp_dout(dpp,0) << "usage clear on oid="<< oid << "failed with ret=" << ret << dendl;
return ret;
}
}
return 0;
}
-int rgw_policy_from_attrset(CephContext *cct, map<string, bufferlist>& attrset, RGWAccessControlPolicy *policy)
+int rgw_policy_from_attrset(const DoutPrefixProvider *dpp, CephContext *cct, map<string, bufferlist>& attrset, RGWAccessControlPolicy *policy)
{
map<string, bufferlist>::iterator aiter = attrset.find(RGW_ATTR_ACL);
if (aiter == attrset.end())
try {
policy->decode(iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
RGWAccessControlPolicy_S3 *s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
- ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
+ ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy";
s3policy->to_xml(*_dout);
*_dout << dendl;
}
rgw_obj_index_key prev_marker;
for (uint16_t attempt = 1; /* empty */; ++attempt) {
- ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ <<
" starting attempt " << attempt << dendl;
if (attempt > 1 && !(prev_marker < cur_marker)) {
// we've failed to make forward progress
- ldout(cct, 0) << "RGWRados::Bucket::List::" << __func__ <<
+ ldpp_dout(dpp, 0) << "RGWRados::Bucket::List::" << __func__ <<
": ERROR marker failed to make forward progress; attempt=" << attempt <<
", prev_marker=" << prev_marker <<
", cur_marker=" << cur_marker << dendl;
*/
bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj);
if (!valid) {
- ldout(cct, 0) << "ERROR: could not parse object name: " <<
+ ldpp_dout(dpp, 0) << "ERROR: could not parse object name: " <<
obj.name << dendl;
continue;
}
// after the prefix
if (delim_pos !=
int(obj.name.length() - params.delim.length())) {
- ldout(cct, 0) <<
+ ldpp_dout(dpp, 0) <<
"WARNING: found delimiter in place other than the end of "
"the prefix; obj.name=" << obj.name <<
", prefix=" << params.prefix << dendl;
goto done;
}
- ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ <<
" adding entry " << entry.key << " to result" << dendl;
result->emplace_back(std::move(entry));
cur_marker.name.substr(0, marker_delim_pos);
skip_after_delim.append(after_delim_s);
- ldout(cct, 20) << "skip_after_delim=" << skip_after_delim << dendl;
+ ldpp_dout(dpp, 20) << "skip_after_delim=" << skip_after_delim << dendl;
if (skip_after_delim > cur_marker.name) {
cur_marker = skip_after_delim;
- ldout(cct, 20) << "setting cur_marker="
+ ldpp_dout(dpp, 20) << "setting cur_marker="
<< cur_marker.name
<< "[" << cur_marker.instance << "]"
<< dendl;
}
} // if older osd didn't do delimiter filtering
- ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ <<
" INFO end of outer loop, truncated=" << truncated <<
", count=" << count << ", attempt=" << attempt << dendl;
optional_yield y)
{
RGWRados *store = target->get_store();
- CephContext *cct = store->ctx();
int shard_id = target->get_shard_id();
int count = 0;
*/
bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj);
if (!valid) {
- ldout(cct, 0) << "ERROR: could not parse object name: " <<
+ ldpp_dout(dpp, 0) << "ERROR: could not parse object name: " <<
obj.name << dendl;
continue;
}
* create a rados pool, associated meta info
* returns 0 on success, -ERR# otherwise.
*/
-int RGWRados::create_pool(const rgw_pool& pool)
+int RGWRados::create_pool(const DoutPrefixProvider *dpp, const rgw_pool& pool)
{
librados::IoCtx io_ctx;
constexpr bool create = true;
- return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, create);
+ return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create);
}
void RGWRados::create_bucket_id(string *bucket_id)
for (int i = 0; i < MAX_CREATE_RETRIES; i++) {
int ret = 0;
- ret = svc.zone->select_bucket_placement(owner, zonegroup_id, placement_rule,
+ ret = svc.zone->select_bucket_placement(dpp, owner, zonegroup_id, placement_rule,
&selected_placement_rule, &rule_info, y);
if (ret < 0)
return ret;
info.quota = *pquota_info;
}
- int r = svc.bi->init_index(info);
+ int r = svc.bi->init_index(dpp, info);
if (r < 0) {
return r;
}
if (r == -ENOENT) {
continue;
}
- ldout(cct, 0) << "get_bucket_info returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "get_bucket_info returned " << r << dendl;
return r;
}
/* only remove it if it's a different bucket instance */
if (orig_info.bucket.bucket_id != bucket.bucket_id) {
- int r = svc.bi->clean_index(info);
+ int r = svc.bi->clean_index(dpp, info);
if (r < 0) {
- ldout(cct, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl;
}
r = ctl.bucket->remove_bucket_instance_info(info.bucket, info, null_yield, dpp);
if (r < 0) {
- ldout(cct, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl;
/* continue anyway */
}
}
}
/* this is highly unlikely */
- ldout(cct, 0) << "ERROR: could not create bucket, continuously raced with bucket creation and removal" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not create bucket, continuously raced with bucket creation and removal" << dendl;
return -ENOENT;
}
return get_obj_data_pool(placement_rule, obj, &raw_obj->pool);
}
-int RGWRados::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx)
+int RGWRados::get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx)
{
string oid, key;
get_obj_bucket_and_oid_loc(obj, oid, key);
rgw_pool pool;
if (!get_obj_data_pool(bucket_info.placement_rule, obj, &pool)) {
- ldout(cct, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl;
return -EIO;
}
- int r = open_pool_ctx(pool, *ioctx, false);
+ int r = open_pool_ctx(dpp, pool, *ioctx, false);
if (r < 0) {
return r;
}
return 0;
}
-int RGWRados::get_obj_head_ref(const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref)
+int RGWRados::get_obj_head_ref(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref)
{
get_obj_bucket_and_oid_loc(obj, ref->obj.oid, ref->obj.loc);
rgw_pool pool;
if (!get_obj_data_pool(bucket_info.placement_rule, obj, &pool)) {
- ldout(cct, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl;
return -EIO;
}
ref->pool = svc.rados->pool(pool);
- int r = ref->pool.open(RGWSI_RADOS::OpenParams()
+ int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams()
.set_mostly_omap(false));
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed opening data pool (pool=" << pool << "); r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed opening data pool (pool=" << pool << "); r=" << r << dendl;
return r;
}
return 0;
}
-int RGWRados::get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref)
+int RGWRados::get_raw_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref)
{
ref->obj = obj;
ref->obj.pool = svc.zone->get_zone_params().domain_root;
}
ref->pool = svc.rados->pool(obj.pool);
- int r = ref->pool.open(RGWSI_RADOS::OpenParams()
+ int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams()
.set_mostly_omap(false));
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed opening pool (pool=" << obj.pool << "); r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed opening pool (pool=" << obj.pool << "); r=" << r << dendl;
return r;
}
return 0;
}
-int RGWRados::get_system_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref)
+int RGWRados::get_system_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref)
{
- return get_raw_obj_ref(obj, ref);
+ return get_raw_obj_ref(dpp, obj, ref);
}
/*
* fixes an issue where head objects were supposed to have a locator created, but ended
* up without one
*/
-int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key)
+int RGWRados::fix_head_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key)
{
const rgw_bucket& bucket = bucket_info.bucket;
string oid;
get_obj_bucket_and_oid_loc(obj, oid, locator);
if (locator.empty()) {
- ldout(cct, 20) << "object does not have a locator, nothing to fix" << dendl;
+ ldpp_dout(dpp, 20) << "object does not have a locator, nothing to fix" << dendl;
return 0;
}
librados::IoCtx ioctx;
- int ret = get_obj_head_ioctx(bucket_info, obj, &ioctx);
+ int ret = get_obj_head_ioctx(dpp, bucket_info, obj, &ioctx);
if (ret < 0) {
cerr << "ERROR: get_obj_head_ioctx() returned ret=" << ret << std::endl;
return ret;
#define HEAD_SIZE 512 * 1024
op.read(0, HEAD_SIZE, &data, NULL);
- ret = rgw_rados_operate(ioctx, oid, &op, &data, null_yield);
+ ret = rgw_rados_operate(dpp, ioctx, oid, &op, &data, null_yield);
if (ret < 0) {
- lderr(cct) << "ERROR: rgw_rados_operate(oid=" << oid << ") returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: rgw_rados_operate(oid=" << oid << ") returned ret=" << ret << dendl;
return ret;
}
if (size > HEAD_SIZE) {
- lderr(cct) << "ERROR: returned object size (" << size << ") > HEAD_SIZE (" << HEAD_SIZE << ")" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") > HEAD_SIZE (" << HEAD_SIZE << ")" << dendl;
return -EIO;
}
if (size != data.length()) {
- lderr(cct) << "ERROR: returned object size (" << size << ") != data.length() (" << data.length() << ")" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") != data.length() (" << data.length() << ")" << dendl;
return -EIO;
}
wop.write(0, data);
ioctx.locator_set_key(locator);
- rgw_rados_operate(ioctx, oid, &wop, null_yield);
+ rgw_rados_operate(dpp, ioctx, oid, &wop, null_yield);
}
if (remove_bad) {
ret = ioctx.remove(oid);
if (ret < 0) {
- lderr(cct) << "ERROR: failed to remove original bad object" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to remove original bad object" << dendl;
return ret;
}
}
return 0;
}
-int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx,
+int RGWRados::move_rados_obj(const DoutPrefixProvider *dpp,
+ librados::IoCtx& src_ioctx,
const string& src_oid, const string& src_locator,
librados::IoCtx& dst_ioctx,
const string& dst_oid, const string& dst_locator)
mtime = real_clock::from_timespec(mtime_ts);
}
rop.read(ofs, chunk_size, &data, NULL);
- ret = rgw_rados_operate(src_ioctx, src_oid, &rop, &data, null_yield);
+ ret = rgw_rados_operate(dpp, src_ioctx, src_oid, &rop, &data, null_yield);
if (ret < 0) {
goto done_err;
}
mtime = real_clock::from_timespec(mtime_ts);
}
wop.write(ofs, data);
- ret = rgw_rados_operate(dst_ioctx, dst_oid, &wop, null_yield);
+ ret = rgw_rados_operate(dpp, dst_ioctx, dst_oid, &wop, null_yield);
if (ret < 0) {
goto done_err;
}
} while (!done);
if (ofs != size) {
- lderr(cct) << "ERROR: " << __func__ << ": copying " << src_oid << " -> " << dst_oid
+ ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": copying " << src_oid << " -> " << dst_oid
<< ": expected " << size << " bytes to copy, ended up with " << ofs << dendl;
ret = -EIO;
goto done_err;
done_err:
// TODO: clean up dst_oid if we created it
- lderr(cct) << "ERROR: failed to copy " << src_oid << " -> " << dst_oid << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to copy " << src_oid << " -> " << dst_oid << dendl;
return ret;
}
}
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
if (astate->manifest) {
RGWObjManifest::obj_iterator miter;
RGWObjManifest& manifest = *astate->manifest;
- for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) {
+ for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) {
rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store);
rgw_obj loc;
string oid;
*need_fix = true;
}
if (fix) {
- r = move_rados_obj(src_ioctx, oid, bad_loc, ioctx, oid, locator);
+ r = move_rados_obj(dpp, src_ioctx, oid, bad_loc, ioctx, oid, locator);
if (r < 0) {
- lderr(cct) << "ERROR: copy_rados_obj() on oid=" << oid << " returned r=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: copy_rados_obj() on oid=" << oid << " returned r=" << r << dendl;
}
}
}
string oid;
- ret = store->svc.bi_rados->open_bucket_index_shard(*bucket_info_p, obj.get_hash_object(), &bucket_obj, &shard_id);
+ ret = store->svc.bi_rados->open_bucket_index_shard(dpp, *bucket_info_p, obj.get_hash_object(), &bucket_obj, &shard_id);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj.get_raw_obj() << dendl;
string oid;
- ret = store->svc.bi_rados->open_bucket_index_shard(*bucket_info_p, shard_id, idx_layout, &bucket_obj);
+ ret = store->svc.bi_rados->open_bucket_index_shard(dpp, *bucket_info_p, shard_id, idx_layout, &bucket_obj);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
ldpp_dout(dpp, 20) << " bucket index oid: " << bucket_obj.get_raw_obj() << dendl;
return 0;
}
-int RGWRados::BucketShard::init(const RGWBucketInfo& bucket_info,
+int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info,
const rgw_obj& obj)
{
bucket = bucket_info.bucket;
- int ret = store->svc.bi_rados->open_bucket_index_shard(bucket_info,
+ int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info,
obj.get_hash_object(),
&bucket_obj,
&shard_id);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj << dendl;
+ ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl;
return 0;
}
-int RGWRados::BucketShard::init(const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid)
+int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid)
{
bucket = bucket_info.bucket;
shard_id = sid;
- int ret = store->svc.bi_rados->open_bucket_index_shard(bucket_info, shard_id, idx_layout, &bucket_obj);
+ int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info, shard_id, idx_layout, &bucket_obj);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj << dendl;
+ ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl;
return 0;
}
r = get_bucket_info(&svc, bucket->get_tenant(), bucket->get_info().swift_ver_location, dest_bucket_info, NULL, null_yield, NULL);
if (r < 0) {
- ldout(cct, 10) << "failed to read dest bucket info: r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "failed to read dest bucket info: r=" << r << dendl;
if (r == -ENOENT) {
return -ERR_PRECONDITION_FAILED;
}
}
rgw_rados_ref ref;
- r = store->get_obj_head_ref(target->get_bucket_info(), obj, &ref);
+ r = store->get_obj_head_ref(dpp, target->get_bucket_info(), obj, &ref);
if (r < 0)
return r;
auto& ioctx = ref.pool.ioctx();
tracepoint(rgw_rados, operate_enter, req_id.c_str());
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
tracepoint(rgw_rados, operate_exit, req_id.c_str());
if (r < 0) { /* we can expect to get -ECANCELED if object was replaced under,
or -ENOENT if was removed, or -EEXIST if it did not exist
epoch = ioctx.get_last_version();
poolid = ioctx.get_id();
- r = target->complete_atomic_modification();
+ r = target->complete_atomic_modification(dpp);
if (r < 0) {
- ldout(store->ctx(), 0) << "ERROR: complete_atomic_modification returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned r=" << r << dendl;
}
tracepoint(rgw_rados, complete_enter, req_id.c_str());
rgw_obj_index_key obj_key;
obj.key.get_index_key(&obj_key);
- r = store->obj_expirer->hint_add(meta.delete_at, obj.bucket.tenant, obj.bucket.name,
+ r = store->obj_expirer->hint_add(dpp, meta.delete_at, obj.bucket.tenant, obj.bucket.name,
obj.bucket.bucket_id, obj_key);
if (r < 0) {
- ldout(store->ctx(), 0) << "ERROR: objexp_hint_add() returned r=" << r << ", object will not get removed" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: objexp_hint_add() returned r=" << r << ", object will not get removed" << dendl;
/* ignoring error, nothing we can do at this point */
}
}
done_cancel:
int ret = index_op->cancel(dpp);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: index_op.cancel()() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: index_op.cancel()() returned ret=" << ret << dendl;
}
meta.canceled = true;
class RGWRadosPutObj : public RGWHTTPStreamRWRequest::ReceiveCB
{
+ const DoutPrefixProvider *dpp;
CephContext* cct;
rgw_obj obj;
rgw::putobj::DataProcessor *filter;
uint64_t lofs{0}; /* logical ofs */
std::function<int(map<string, bufferlist>&)> attrs_handler;
public:
- RGWRadosPutObj(CephContext* cct,
+ RGWRadosPutObj(const DoutPrefixProvider *dpp,
+ CephContext* cct,
CompressorRef& plugin,
boost::optional<RGWPutObj_Compress>& compressor,
rgw::putobj::ObjectProcessor *p,
void (*_progress_cb)(off_t, void *),
void *_progress_data,
std::function<int(map<string, bufferlist>&)> _attrs_handler) :
+ dpp(dpp),
cct(cct),
filter(p),
compressor(compressor),
if (extra_data_bl.length()) {
JSONParser jp;
if (!jp.parse(extra_data_bl.c_str(), extra_data_bl.length())) {
- ldout(cct, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl;
+ ldpp_dout(dpp, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl;
return -EIO;
}
bool compressed = false;
int r = rgw_compression_info_from_attr(bl, compressed, info);
if (r < 0) {
- ldout(cct, 4) << "failed to decode compression info, "
+ ldpp_dout(dpp, 4) << "failed to decode compression info, "
"disabling etag verification" << dendl;
try_etag_verify = false;
} else if (compressed) {
* to know the sequence in which the filters must be applied.
*/
if (try_etag_verify && src_attrs.find(RGW_ATTR_CRYPT_MODE) == src_attrs.end()) {
- ret = rgw::putobj::create_etag_verifier(cct, filter, manifest_bl,
+ ret = rgw::putobj::create_etag_verifier(dpp, cct, filter, manifest_bl,
compression_info,
etag_verifier);
if (ret < 0) {
- ldout(cct, 4) << "failed to initial etag verifier, "
+ ldpp_dout(dpp, 4) << "failed to initial etag verifier, "
"disabling etag verification" << dendl;
} else {
filter = etag_verifier.get();
}
};
-int RGWRados::stat_remote_obj(RGWObjectCtx& obj_ctx,
+int RGWRados::stat_remote_obj(const DoutPrefixProvider *dpp,
+ RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
constexpr bool rgwx_stat = true;
constexpr bool sync_manifest = true;
constexpr bool skip_decrypt = true;
- int ret = conn->get_obj(user_id, info, src_obj, pmod, unmod_ptr,
+ int ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr,
dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver,
prepend_meta, get_op, rgwx_stat,
sync_manifest, skip_decrypt,
} else {
map<string, RGWRESTConn *>::iterator iter = zonegroup_conn_map.find(src_bucket->get_info().zonegroup);
if (iter == zonegroup_conn_map.end()) {
- ldout(cct, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl;
+ ldpp_dout(dpp, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
} else {
auto iter = zone_conn_map.find(source_zone);
if (iter == zone_conn_map.end()) {
- ldout(cct, 0) << "could not find zone connection to zone: " << source_zone << dendl;
+ ldpp_dout(dpp, 0) << "could not find zone connection to zone: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
std::optional<rgw_user> override_owner;
- RGWRadosPutObj cb(cct, plugin, compressor, &processor, progress_cb, progress_data,
+ RGWRadosPutObj cb(dpp, cct, plugin, compressor, &processor, progress_cb, progress_data,
[&](map<string, bufferlist>& obj_attrs) {
const rgw_placement_rule *ptail_rule;
&override_owner,
&ptail_rule);
if (ret < 0) {
- ldout(cct, 5) << "Aborting fetch: source object filter returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "Aborting fetch: source object filter returned ret=" << ret << dendl;
return ret;
}
if (compression_type != "none") {
plugin = Compressor::create(cct, compression_type);
if (!plugin) {
- ldout(cct, 1) << "Cannot load plugin for compression type "
+ ldpp_dout(dpp, 1) << "Cannot load plugin for compression type "
<< compression_type << dendl;
}
}
static constexpr bool rgwx_stat = false;
static constexpr bool sync_manifest = true;
static constexpr bool skip_decrypt = true;
- ret = conn->get_obj(user_id, info, src_obj, pmod, unmod_ptr,
+ ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr,
dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver,
prepend_meta, get_op, rgwx_stat,
sync_manifest, skip_decrypt,
}
if (cb.get_data_len() != expected_size) {
ret = -EIO;
- ldout(cct, 0) << "ERROR: object truncated during fetching, expected "
+ ldpp_dout(dpp, 0) << "ERROR: object truncated during fetching, expected "
<< expected_size << " bytes but received " << cb.get_data_len() << dendl;
goto set_err_state;
}
RGWUserInfo owner_info;
if (ctl.user->get_info_by_uid(dpp, *override_owner, &owner_info, null_yield) < 0) {
- ldout(cct, 10) << "owner info does not exist" << dendl;
+ ldpp_dout(dpp, 10) << "owner info does not exist" << dendl;
return -EINVAL;
}
auto aiter = obj_attrs.find(RGW_ATTR_ACL);
if (aiter == obj_attrs.end()) {
- ldout(cct, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl;
acl.create_default(owner_info.user_id, owner_info.display_name);
} else {
auto iter = aiter->second.cbegin();
try {
acl.decode(iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
}
try {
decode(delete_at, iter->second);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode delete_at field in intra zone copy" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode delete_at field in intra zone copy" << dendl;
}
}
}
try {
decode(pg_ver, iter);
} catch (buffer::error& err) {
- ldout(ctx(), 0) << "ERROR: failed to decode pg ver attribute, ignoring" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode pg ver attribute, ignoring" << dendl;
/* non critical error */
}
}
if (verifier_etag != trimmed_etag) {
ret = -EIO;
- ldout(cct, 0) << "ERROR: source and destination objects don't match. Expected etag:"
+ ldpp_dout(dpp, 0) << "ERROR: source and destination objects don't match. Expected etag:"
<< trimmed_etag << " Computed etag:" << verifier_etag << dendl;
goto set_err_state;
}
}
if (copy_if_newer && canceled) {
- ldout(cct, 20) << "raced with another write of obj: " << dest_obj << dendl;
+ ldpp_dout(dpp, 20) << "raced with another write of obj: " << dest_obj << dendl;
obj_ctx.invalidate(dest_obj->get_obj()); /* object was overwritten */
ret = get_obj_state(dpp, &obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), &dest_state, false, null_yield);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl;
goto set_err_state;
}
dest_mtime_weight.init(dest_state);
dest_mtime_weight.high_precision = high_precision_time;
if (!dest_state->exists ||
dest_mtime_weight < set_mtime_weight) {
- ldout(cct, 20) << "retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl;
+ ldpp_dout(dpp, 20) << "retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl;
continue;
} else {
- ldout(cct, 20) << "not retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl;
+ ldpp_dout(dpp, 20) << "not retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl;
}
}
break;
}
if (i == MAX_COMPLETE_RETRY) {
- ldout(cct, 0) << "ERROR: retried object completion too many times, something is wrong!" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: retried object completion too many times, something is wrong!" << dendl;
ret = -EIO;
goto set_err_state;
}
auto rest_master_conn = svc.zone->get_master_conn();
- int ret = rest_master_conn->put_obj_async_init(user_id, dest_obj, src_attrs, &out_stream_req);
+ int ret = rest_master_conn->put_obj_async_init(dpp, user_id, dest_obj, src_attrs, &out_stream_req);
if (ret < 0) {
return ret;
}
mtime, real_time(), attrs, olh_epoch, delete_at, petag, dpp, y);
}
- RGWObjManifest::obj_iterator miter = astate->manifest->obj_begin();
+ RGWObjManifest::obj_iterator miter = astate->manifest->obj_begin(dpp);
if (copy_first) { // we need to copy first chunk, not increase refcount
++miter;
}
rgw_rados_ref ref;
- ret = get_raw_obj_ref(miter.get_location().get_raw_obj(store), &ref);
+ ret = get_raw_obj_ref(dpp, miter.get_location().get_raw_obj(store), &ref);
if (ret < 0) {
return ret;
}
manifest.set_tail_placement(tail_placement.placement_rule, src_obj->get_bucket()->get_key());
}
string ref_tag;
- for (; miter != astate->manifest->obj_end(); ++miter) {
+ for (; miter != astate->manifest->obj_end(dpp); ++miter) {
ObjectWriteOperation op;
ref_tag = tag + '\0';
cls_refcount_get(op, ref_tag, true);
auto& ioctx = ref.pool.ioctx();
ioctx.locator_set_key(loc.loc);
- ret = rgw_rados_operate(ioctx, loc.oid, &op, null_yield);
+ ret = rgw_rados_operate(dpp, ioctx, loc.oid, &op, null_yield);
if (ret < 0) {
goto done_ret;
}
ref.pool.ioctx().locator_set_key(riter->loc);
- int r = rgw_rados_operate(ref.pool.ioctx(), riter->oid, &op, null_yield);
+ int r = rgw_rados_operate(dpp, ref.pool.ioctx(), riter->oid, &op, null_yield);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup after error failed to drop reference on obj=" << *riter << dendl;
}
const rgw_bucket& bucket = bucket_info.bucket;
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
- int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
r = get_bucket_instance_info(obj_ctx, bucket, info, nullptr, &attrs, null_yield, dpp);
}
if (r < 0) {
- ldout(cct, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
return r;
}
r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp);
if (r < 0) {
- ldout(cct, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
return r;
}
map<string, bufferlist> attrs;
int r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, dpp, &attrs);
if (r < 0) {
- ldout(cct, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
ret = r;
continue;
}
r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp);
if (r < 0) {
- ldout(cct, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
ret = r;
continue;
}
return 0;
}
-int RGWRados::Object::complete_atomic_modification()
+int RGWRados::Object::complete_atomic_modification(const DoutPrefixProvider *dpp)
{
if ((!state->manifest)|| state->keep_tail)
return 0;
cls_rgw_obj_chain chain;
- store->update_gc_chain(obj, *state->manifest, &chain);
+ store->update_gc_chain(dpp, obj, *state->manifest, &chain);
if (chain.empty()) {
return 0;
auto ret = store->gc->send_chain(chain, tag); // do it synchronously
if (ret < 0) {
//Delete objects inline if send chain to gc fails
- store->delete_objs_inline(chain, tag);
+ store->delete_objs_inline(dpp, chain, tag);
}
return 0;
}
-void RGWRados::update_gc_chain(rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain)
+void RGWRados::update_gc_chain(const DoutPrefixProvider *dpp, rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain)
{
RGWObjManifest::obj_iterator iter;
rgw_raw_obj raw_head;
obj_to_raw(manifest.get_head_placement_rule(), head_obj, &raw_head);
- for (iter = manifest.obj_begin(); iter != manifest.obj_end(); ++iter) {
+ for (iter = manifest.obj_begin(dpp); iter != manifest.obj_end(dpp); ++iter) {
const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(store);
if (mobj == raw_head)
continue;
return gc->send_chain(chain, tag);
}
-void RGWRados::delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag)
+void RGWRados::delete_objs_inline(const DoutPrefixProvider *dpp, cls_rgw_obj_chain& chain, const string& tag)
{
string last_pool;
std::unique_ptr<IoCtx> ctx(new IoCtx);
cls_rgw_obj& obj = *liter;
if (obj.pool != last_pool) {
ctx.reset(new IoCtx);
- ret = rgw_init_ioctx(get_rados_handle(), obj.pool, *ctx);
+ ret = rgw_init_ioctx(dpp, get_rados_handle(), obj.pool, *ctx);
if (ret < 0) {
last_pool = "";
- ldout(cct, 0) << "ERROR: failed to create ioctx pool=" <<
+ ldpp_dout(dpp, 0) << "ERROR: failed to create ioctx pool=" <<
obj.pool << dendl;
continue;
}
}
ctx->locator_set_key(obj.loc);
const string& oid = obj.key.name; /* just stored raw oid there */
- ldout(cct, 5) << "delete_objs_inline: removing " << obj.pool <<
+ ldpp_dout(dpp, 5) << "delete_objs_inline: removing " << obj.pool <<
":" << obj.key.name << dendl;
ObjectWriteOperation op;
cls_refcount_put(op, tag, true);
ret = ctx->operate(oid, &op);
if (ret < 0) {
- ldout(cct, 5) << "delete_objs_inline: refcount put returned error " << ret << dendl;
+ ldpp_dout(dpp, 5) << "delete_objs_inline: refcount put returned error " << ret << dendl;
}
}
}
}
}
-int RGWRados::bucket_check_index(RGWBucketInfo& bucket_info,
+int RGWRados::bucket_check_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,
map<RGWObjCategory, RGWStorageStats> *existing_stats,
map<RGWObjCategory, RGWStorageStats> *calculated_stats)
{
map<int, string> oids;
map<int, struct rgw_cls_check_index_ret> bucket_objs_ret;
- int ret = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &oids, nullptr);
+ int ret = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &oids, nullptr);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWRados::bucket_rebuild_index(RGWBucketInfo& bucket_info)
+int RGWRados::bucket_rebuild_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
- int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
if (r < 0) {
return r;
}
return CLSRGWIssueBucketRebuild(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)();
}
-int RGWRados::bucket_set_reshard(const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry)
+int RGWRados::bucket_set_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
- int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
if (r < 0) {
return r;
}
ldpp_dout(dpp, 0) << "defer chain tag=" << tag << dendl;
cls_rgw_obj_chain chain;
- update_gc_chain(state->obj, *state->manifest, &chain);
+ update_gc_chain(dpp, state->obj, *state->manifest, &chain);
return gc->async_defer_chain(tag, chain);
}
} else {
rgw_bucket_dir_entry dirent;
- int r = store->bi_get_instance(target->get_bucket_info(), obj, &dirent);
+ int r = store->bi_get_instance(dpp, target->get_bucket_info(), obj, &dirent);
if (r < 0) {
return r;
}
BucketShard *bs;
int r = target->get_bucket_shard(&bs, dpp);
if (r < 0) {
- ldout(store->ctx(), 5) << "failed to get BucketShard object: r=" << r << dendl;
+ ldpp_dout(dpp, 5) << "failed to get BucketShard object: r=" << r << dendl;
return r;
}
r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
return r;
}
}
rgw_rados_ref ref;
- int r = store->get_obj_head_ref(target->get_bucket_info(), obj, &ref);
+ int r = store->get_obj_head_ref(dpp, target->get_bucket_info(), obj, &ref);
if (r < 0) {
return r;
}
store->remove_rgw_head_obj(op);
auto& ioctx = ref.pool.ioctx();
- r = rgw_rados_operate(ioctx, ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, null_yield);
/* raced with another operation, object state is indeterminate */
const bool need_invalidate = (r == -ECANCELED);
}
r = index_op.complete_del(dpp, poolid, ioctx.get_last_version(), state->mtime, params.remove_objs);
- int ret = target->complete_atomic_modification();
+ int ret = target->complete_atomic_modification(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned ret=" << ret << dendl;
}
} else {
int ret = index_op.cancel(dpp);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
}
}
return del_op.delete_obj(null_yield, dpp);
}
-int RGWRados::delete_raw_obj(const rgw_raw_obj& obj)
+int RGWRados::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj)
{
rgw_rados_ref ref;
- int r = get_raw_obj_ref(obj, &ref);
+ int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
ObjectWriteOperation op;
op.remove();
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
if (r < 0)
return r;
RGWBucketInfo bucket_info;
int ret = get_bucket_instance_info(obj_ctx, obj.bucket, bucket_info, NULL, NULL, null_yield, dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl;
return ret;
}
return index_op.complete_del(dpp, -1 /* pool */, 0, mtime, NULL);
}
-static void generate_fake_tag(rgw::sal::Store* store, map<string, bufferlist>& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl)
+static void generate_fake_tag(const DoutPrefixProvider *dpp, rgw::sal::Store* store, map<string, bufferlist>& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl)
{
string tag;
- RGWObjManifest::obj_iterator mi = manifest.obj_begin();
- if (mi != manifest.obj_end()) {
+ RGWObjManifest::obj_iterator mi = manifest.obj_begin(dpp);
+ if (mi != manifest.obj_end(dpp)) {
if (manifest.has_tail()) // first object usually points at the head, let's skip to a more unique part
++mi;
tag = mi.get_location().get_raw_obj(store).oid;
int r = -ENOENT;
if (!assume_noent) {
- r = RGWRados::raw_obj_stat(raw_obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : NULL), NULL, y);
+ r = RGWRados::raw_obj_stat(dpp, raw_obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : NULL), NULL, y);
}
if (r == -ENOENT) {
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 20>() && \
s->manifest->has_explicit_objs()) {
RGWObjManifest::obj_iterator mi;
- for (mi = s->manifest->obj_begin(); mi != s->manifest->obj_end(); ++mi) {
+ for (mi = s->manifest->obj_begin(dpp); mi != s->manifest->obj_end(dpp); ++mi) {
ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl;
}
}
* Uh oh, something's wrong, object with manifest should have tag. Let's
* create one out of the manifest, would be unique
*/
- generate_fake_tag(store, s->attrset, *s->manifest, manifest_bl, s->obj_tag);
+ generate_fake_tag(dpp, store, s->attrset, *s->manifest, manifest_bl, s->obj_tag);
s->fake_tag = true;
}
}
if (is_olh(s->attrset)) {
s->is_olh = true;
- ldout(cct, 20) << __func__ << ": setting s->olh_tag to " << string(s->olh_tag.c_str(), s->olh_tag.length()) << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": setting s->olh_tag to " << string(s->olh_tag.c_str(), s->olh_tag.length()) << dendl;
if (need_follow_olh) {
return get_olh_target_state(dpp, *rctx, bucket_info, obj, s, state, y);
return 0;
}
-int RGWRados::Object::Stat::stat_async()
+int RGWRados::Object::Stat::stat_async(const DoutPrefixProvider *dpp)
{
RGWObjectCtx& ctx = source->get_ctx();
rgw_obj& obj = source->get_obj();
string loc;
get_obj_bucket_and_oid_loc(obj, oid, loc);
- int r = store->get_obj_head_ioctx(source->get_bucket_info(), obj, &state.io_ctx);
+ int r = store->get_obj_head_ioctx(dpp, source->get_bucket_info(), obj, &state.io_ctx);
if (r < 0) {
return r;
}
state.io_ctx.locator_set_key(loc);
r = state.io_ctx.aio_operate(oid, state.completion, &op, NULL);
if (r < 0) {
- ldout(store->ctx(), 5) << __func__
+ ldpp_dout(dpp, 5) << __func__
<< ": ERROR: aio_operate() returned ret=" << r
<< dendl;
return r;
if (r < 0)
return r;
- return append_atomic_test(*pstate, op);
+ return append_atomic_test(dpp, *pstate, op);
}
-int RGWRados::append_atomic_test(const RGWObjState* state,
+int RGWRados::append_atomic_test(const DoutPrefixProvider *dpp,
+ const RGWObjState* state,
librados::ObjectOperation& op)
{
if (!state->is_atomic) {
- ldout(cct, 20) << "state for obj=" << state->obj << " is not atomic, not appending atomic test" << dendl;
+ ldpp_dout(dpp, 20) << "state for obj=" << state->obj << " is not atomic, not appending atomic test" << dendl;
return 0;
}
if (state->obj_tag.length() > 0 && !state->fake_tag) {// check for backward compatibility
op.cmpxattr(RGW_ATTR_ID_TAG, LIBRADOS_CMPXATTR_OP_EQ, state->obj_tag);
} else {
- ldout(cct, 20) << "state->obj_tag is empty, not appending atomic test" << dendl;
+ ldpp_dout(dpp, 20) << "state->obj_tag is empty, not appending atomic test" << dendl;
}
return 0;
}
}
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
rgw_obj_index_key obj_key;
obj.key.get_index_key(&obj_key);
- obj_expirer->hint_add(ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key);
+ obj_expirer->hint_add(dpp, ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode " RGW_ATTR_DELETE_AT << " attr" << dendl;
}
struct timespec mtime_ts = real_clock::to_timespec(mtime);
op.mtime2(&mtime_ts);
auto& ioctx = ref.pool.ioctx();
- r = rgw_rados_operate(ioctx, ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, null_yield);
if (state) {
if (r >= 0) {
bufferlist acl_bl = attrs[RGW_ATTR_ACL];
} else {
int ret = index_op.cancel(dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: complete_update_index_cancel() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: complete_update_index_cancel() returned ret=" << ret << dendl;
}
}
}
state.cur_pool = state.head_obj.pool;
state.cur_ioctx = &state.io_ctxs[state.cur_pool];
- r = store->get_obj_head_ioctx(bucket_info, state.obj, state.cur_ioctx);
+ r = store->get_obj_head_ioctx(dpp, bucket_info, state.obj, state.cur_ioctx);
if (r < 0) {
return r;
}
for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) {
int ret = get_bucket_shard(&bs, dpp);
if (ret < 0) {
- ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl;
return ret;
}
r = call(bs);
if (r != -ERR_BUSY_RESHARDING) {
break;
}
- ldout(store->ctx(), 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl;
string new_bucket_id;
r = store->block_while_resharding(bs, &new_bucket_id,
target->bucket_info, null_yield, dpp);
if (r < 0) {
return r;
}
- ldout(store->ctx(), 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl;
+ ldpp_dout(dpp, 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl;
i = 0; /* resharding is finished, make sure we can retry */
r = target->update_bucket_id(new_bucket_id, dpp);
if (r < 0) {
- ldout(store->ctx(), 0) << "ERROR: update_bucket_id() new_bucket_id=" << new_bucket_id << " returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: update_bucket_id() new_bucket_id=" << new_bucket_id << " returned r=" << r << dendl;
return r;
}
invalidate_bs();
}
int r = guard_reshard(dpp, nullptr, [&](BucketShard *bs) -> int {
- return store->cls_obj_prepare_op(*bs, op, optag, obj, bilog_flags, y, zones_trace);
+ return store->cls_obj_prepare_op(dpp, *bs, op, optag, obj, bilog_flags, y, zones_trace);
});
if (r < 0) {
int ret = get_bucket_shard(&bs, dpp);
if (ret < 0) {
- ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl;
return ret;
}
int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
}
return ret;
int ret = get_bucket_shard(&bs, dpp);
if (ret < 0) {
- ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl;
return ret;
}
int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
}
return ret;
*/
int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
}
return ret;
int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider *dpp)
{
RGWRados *store = source->get_store();
- CephContext *cct = store->ctx();
rgw_raw_obj read_obj;
uint64_t read_ofs = ofs;
if (astate->manifest && astate->manifest->has_tail()) {
/* now get the relevant object part */
- RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(ofs);
+ RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(dpp, ofs);
uint64_t stripe_ofs = iter.get_stripe_ofs();
read_obj = iter.get_location().get_raw_obj(store->store);
auto iter = state.io_ctxs.find(read_obj.pool);
if (iter == state.io_ctxs.end()) {
state.cur_ioctx = &state.io_ctxs[read_obj.pool];
- r = store->open_pool_ctx(read_obj.pool, *state.cur_ioctx, false);
+ r = store->open_pool_ctx(dpp, read_obj.pool, *state.cur_ioctx, false);
if (r < 0) {
- ldout(cct, 20) << "ERROR: failed to open pool context for pool=" << read_obj.pool << " r=" << r << dendl;
+ ldpp_dout(dpp, 20) << "ERROR: failed to open pool context for pool=" << read_obj.pool << " r=" << r << dendl;
return r;
}
} else {
}
};
-static int _get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs,
+static int _get_obj_iterate_cb(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& read_obj, off_t obj_ofs,
off_t read_ofs, off_t len, bool is_head_obj,
RGWObjState *astate, void *arg)
{
struct get_obj_data *d = (struct get_obj_data *)arg;
- return d->store->get_obj_iterate_cb(read_obj, obj_ofs, read_ofs, len,
+ return d->store->get_obj_iterate_cb(dpp, read_obj, obj_ofs, read_ofs, len,
is_head_obj, astate, arg);
}
-int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs,
+int RGWRados::get_obj_iterate_cb(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& read_obj, off_t obj_ofs,
off_t read_ofs, off_t len, bool is_head_obj,
RGWObjState *astate, void *arg)
{
if (is_head_obj) {
/* only when reading from the head object do we need to do the atomic test */
- int r = append_atomic_test(astate, op);
+ int r = append_atomic_test(dpp, astate, op);
if (r < 0)
return r;
}
auto obj = d->store->svc.rados->obj(read_obj);
- int r = obj.open();
+ int r = obj.open(dpp);
if (r < 0) {
- ldout(cct, 4) << "failed to open rados context for " << read_obj << dendl;
+ ldpp_dout(dpp, 4) << "failed to open rados context for " << read_obj << dendl;
return r;
}
- ldout(cct, 20) << "rados->get_obj_iterate_cb oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl;
+ ldpp_dout(dpp, 20) << "rados->get_obj_iterate_cb oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl;
op.read(read_ofs, len, nullptr, nullptr);
const uint64_t cost = len;
int r = store->iterate_obj(dpp, obj_ctx, source->get_bucket_info(), state.obj,
ofs, end, chunk_size, _get_obj_iterate_cb, &data, y);
if (r < 0) {
- ldout(cct, 0) << "iterate_obj() failed with " << r << dendl;
+ ldpp_dout(dpp, 0) << "iterate_obj() failed with " << r << dendl;
data.cancel(); // drain completions without writing back to client
return r;
}
if (astate->manifest) {
/* now get the relevant object stripe */
- RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(ofs);
+ RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(dpp, ofs);
- RGWObjManifest::obj_iterator obj_end = astate->manifest->obj_end();
+ RGWObjManifest::obj_iterator obj_end = astate->manifest->obj_end(dpp);
for (; iter != obj_end && ofs <= end; ++iter) {
off_t stripe_ofs = iter.get_stripe_ofs();
}
reading_from_head = (read_obj == head_obj);
- r = cb(read_obj, ofs, read_ofs, read_len, reading_from_head, astate, arg);
+ r = cb(dpp, read_obj, ofs, read_ofs, read_len, reading_from_head, astate, arg);
if (r < 0) {
return r;
}
read_obj = head_obj;
uint64_t read_len = std::min(len, max_chunk_size);
- r = cb(read_obj, ofs, ofs, read_len, reading_from_head, astate, arg);
+ r = cb(dpp, read_obj, ofs, ofs, read_len, reading_from_head, astate, arg);
if (r < 0) {
return r;
}
return 0;
}
-int RGWRados::obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectWriteOperation *op)
+int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectWriteOperation *op)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
- return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, null_yield);
+ return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, null_yield);
}
-int RGWRados::obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectReadOperation *op)
+int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectReadOperation *op)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
bufferlist outbl;
- return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, &outbl, null_yield);
+ return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, &outbl, null_yield);
}
-int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag)
+int RGWRados::olh_init_modification_impl(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag)
{
ObjectWriteOperation op;
*/
if (has_tag) {
/* guard against racing writes */
- bucket_index_guard_olh_op(state, op);
+ bucket_index_guard_olh_op(dpp, state, op);
}
if (!has_tag) {
op.setxattr(attr_name.c_str(), bl);
- int ret = obj_operate(bucket_info, olh_obj, &op);
+ int ret = obj_operate(dpp, bucket_info, olh_obj, &op);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWRados::olh_init_modification(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj, string *op_tag)
+int RGWRados::olh_init_modification(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj, string *op_tag)
{
int ret;
- ret = olh_init_modification_impl(bucket_info, state, obj, op_tag);
+ ret = olh_init_modification_impl(dpp, bucket_info, state, obj, op_tag);
if (ret == -EEXIST) {
ret = -ECANCELED;
}
for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) {
r = bs->init(pobj->bucket, *pobj, nullptr /* no RGWBucketInfo */, dpp);
if (r < 0) {
- ldout(cct, 5) << "bs.init() returned ret=" << r << dendl;
+ ldpp_dout(dpp, 5) << "bs.init() returned ret=" << r << dendl;
return r;
}
r = call(bs);
if (r != -ERR_BUSY_RESHARDING) {
break;
}
- ldout(cct, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl;
string new_bucket_id;
r = block_while_resharding(bs, &new_bucket_id, bucket_info, null_yield, dpp);
if (r == -ERR_BUSY_RESHARDING) {
RGWBucketInfo fresh_bucket_info = bucket_info;
int ret = try_refresh_bucket_info(fresh_bucket_info, nullptr, dpp);
if (ret < 0) {
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to refresh bucket info after reshard at " <<
log_tag << ": " << cpp_strerror(-ret) << dendl;
return ret;
ldpp_dout(dpp, 10) << __func__ <<
" INFO: was able to take reshard lock for bucket " <<
bucket_id << dendl;
- ret = RGWBucketReshard::clear_resharding(this->store, bucket_info);
+ ret = RGWBucketReshard::clear_resharding(dpp, this->store, bucket_info);
if (ret < 0) {
reshard_lock.unlock();
ldpp_dout(dpp, 0) << __func__ <<
rgw_zone_set *_zones_trace, bool log_data_change)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj_instance, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
delete_marker, op_tag, meta, olh_epoch,
unmod_since, high_precision_time,
svc.zone->get_zone().log_data, zones_trace);
- return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (r < 0) {
ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl;
r = svc.datalog_rados->add_entry(dpp, bucket_info, bs.shard_id);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed writing data log" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed writing data log" << dendl;
}
return 0;
}
-void RGWRados::bucket_index_guard_olh_op(RGWObjState& olh_state, ObjectOperation& op)
+void RGWRados::bucket_index_guard_olh_op(const DoutPrefixProvider *dpp, RGWObjState& olh_state, ObjectOperation& op)
{
- ldout(cct, 20) << __func__ << "(): olh_state.olh_tag=" << string(olh_state.olh_tag.c_str(), olh_state.olh_tag.length()) << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): olh_state.olh_tag=" << string(olh_state.olh_tag.c_str(), olh_state.olh_tag.length()) << dendl;
op.cmpxattr(RGW_ATTR_OLH_ID_TAG, CEPH_OSD_CMPXATTR_OP_EQ, olh_state.olh_tag);
}
const string& op_tag, const string& olh_tag, uint64_t olh_epoch, rgw_zone_set *_zones_trace)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj_instance, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_unlink_instance(op, key, op_tag,
olh_tag, olh_epoch, svc.zone->get_zone().log_data, zones_trace);
- return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (r < 0) {
ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl;
bool *is_truncated)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj_instance, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
int ret =
bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp);
if (ret < 0) {
- ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
int op_ret = 0;
cls_rgw_get_olh_log(op, key, ver_marker, olh_tag, log_ret, op_ret);
bufferlist outbl;
- int r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield);
+ int r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield);
if (r < 0) {
return r;
}
// the attributes from another zone, causing link_olh() to fail endlessly due to
// olh_tag mismatch. this attempts to detect this case and reconstruct the OLH
// attributes from the bucket index. see http://tracker.ceph.com/issues/37792
-int RGWRados::repair_olh(RGWObjState* state, const RGWBucketInfo& bucket_info,
+int RGWRados::repair_olh(const DoutPrefixProvider *dpp, RGWObjState* state, const RGWBucketInfo& bucket_info,
const rgw_obj& obj)
{
// fetch the current olh entry from the bucket index
rgw_bucket_olh_entry olh;
- int r = bi_get_olh(bucket_info, obj, &olh);
+ int r = bi_get_olh(dpp, bucket_info, obj, &olh);
if (r < 0) {
- ldout(cct, 0) << "repair_olh failed to read olh entry for " << obj << dendl;
+ ldpp_dout(dpp, 0) << "repair_olh failed to read olh entry for " << obj << dendl;
return r;
}
if (olh.tag == rgw_bl_str(state->olh_tag)) { // mismatch already resolved?
return 0;
}
- ldout(cct, 4) << "repair_olh setting olh_tag=" << olh.tag
+ ldpp_dout(dpp, 4) << "repair_olh setting olh_tag=" << olh.tag
<< " key=" << olh.key << " delete_marker=" << olh.delete_marker << dendl;
// rewrite OLH_ID_TAG and OLH_INFO from current olh
ObjectWriteOperation op;
// assert this is the same olh tag we think we're fixing
- bucket_index_guard_olh_op(*state, op);
+ bucket_index_guard_olh_op(dpp, *state, op);
// preserve existing mtime
struct timespec mtime_ts = ceph::real_clock::to_timespec(state->mtime);
op.mtime2(&mtime_ts);
op.setxattr(RGW_ATTR_OLH_INFO, bl);
}
rgw_rados_ref ref;
- r = get_obj_head_ref(bucket_info, obj, &ref);
+ r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
if (r < 0) {
- ldout(cct, 0) << "repair_olh failed to write olh attributes with "
+ ldpp_dout(dpp, 0) << "repair_olh failed to write olh attributes with "
<< cpp_strerror(r) << dendl;
return r;
}
int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance, uint64_t ver)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj_instance, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
int ret =
bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp);
if (ret < 0) {
- ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
ObjectWriteOperation op;
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_trim_olh_log(op, key, ver, olh_tag);
- return pbs->bucket_obj.operate(&op, null_yield);
+ return pbs->bucket_obj.operate(dpp, &op, null_yield);
});
if (ret < 0) {
ldpp_dout(dpp, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl;
int RGWRados::bucket_index_clear_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj_instance, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
auto& ref = pbs->bucket_obj.get_ref();
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_clear_olh(op, key, olh_tag);
- return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (ret < 0) {
ldpp_dout(dpp, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl;
}
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, obj, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
rgw_obj obj_instance(bucket, key);
int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl;
return ret;
}
}
/* update olh object */
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
if (r == -ECANCELED) {
r = 0;
}
if (r < 0) {
- ldout(cct, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
return r;
}
r = bucket_index_trim_olh_log(dpp, bucket_info, state, obj, last_ver);
if (r < 0) {
- ldout(cct, 0) << "ERROR: could not trim olh log, r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not trim olh log, r=" << r << dendl;
return r;
}
cls_obj_check_prefix_exist(rm_op, RGW_ATTR_OLH_PENDING_PREFIX, true); /* fail if found one of these, pending modification */
rm_op.remove();
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &rm_op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &rm_op, null_yield);
if (r == -ECANCELED) {
return 0; /* someone else won this race */
} else {
*/
r = bucket_index_clear_olh(dpp, bucket_info, state, obj);
if (r < 0) {
- ldout(cct, 0) << "ERROR: could not clear bucket index olh entries r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not clear bucket index olh entries r=" << r << dendl;
return r;
}
}
return ret;
}
- ret = olh_init_modification(bucket_info, *state, olh_obj, &op_tag);
+ ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag);
if (ret < 0) {
- ldout(cct, 20) << "olh_init_modification() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl;
+ ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl;
if (ret == -ECANCELED) {
continue;
}
if (ret == -ECANCELED) {
// the bucket index rejected the link_olh() due to olh tag mismatch;
// attempt to reconstruct olh head attributes based on the bucket index
- int r2 = repair_olh(state, bucket_info, olh_obj);
+ int r2 = repair_olh(dpp, state, bucket_info, olh_obj);
if (r2 < 0 && r2 != -ECANCELED) {
return r2;
}
}
if (i == MAX_ECANCELED_RETRY) {
- ldout(cct, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl;
return -EIO;
}
if (ret < 0)
return ret;
- ret = olh_init_modification(bucket_info, *state, olh_obj, &op_tag);
+ ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag);
if (ret < 0) {
- ldout(cct, 20) << "olh_init_modification() target_obj=" << target_obj << " returned " << ret << dendl;
+ ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " returned " << ret << dendl;
if (ret == -ECANCELED) {
continue;
}
ret = bucket_index_unlink_instance(dpp, bucket_info, target_obj, op_tag, olh_tag, olh_epoch, zones_trace);
if (ret < 0) {
- ldout(cct, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl;
+ ldpp_dout(dpp, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl;
if (ret == -ECANCELED) {
continue;
}
}
if (i == MAX_ECANCELED_RETRY) {
- ldout(cct, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl;
return -EIO;
}
gen_rand_obj_instance_name(&target_obj->key);
}
-int RGWRados::get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh)
+int RGWRados::get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh)
{
map<string, bufferlist> attrset;
ObjectReadOperation op;
op.getxattrs(&attrset, NULL);
- int r = obj_operate(bucket_info, obj, &op);
+ int r = obj_operate(dpp, bucket_info, obj, &op);
if (r < 0) {
return r;
}
}
}
-int RGWRados::remove_olh_pending_entries(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map<string, bufferlist>& pending_attrs)
+int RGWRados::remove_olh_pending_entries(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map<string, bufferlist>& pending_attrs)
{
rgw_rados_ref ref;
- int r = get_obj_head_ref(bucket_info, olh_obj, &ref);
+ int r = get_obj_head_ref(dpp, bucket_info, olh_obj, &ref);
if (r < 0) {
return r;
}
auto i = pending_attrs.begin();
while (i != pending_attrs.end()) {
ObjectWriteOperation op;
- bucket_index_guard_olh_op(state, op);
+ bucket_index_guard_olh_op(dpp, state, op);
for (int n = 0; n < max_entries && i != pending_attrs.end(); ++n, ++i) {
op.rmxattr(i->first.c_str());
}
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
if (r == -ENOENT || r == -ECANCELED) {
/* raced with some other change, shouldn't sweat about it */
return 0;
}
if (r < 0) {
- ldout(cct, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
return r;
}
}
check_pending_olh_entries(pending_entries, &rm_pending_entries);
if (!rm_pending_entries.empty()) {
- int ret = remove_olh_pending_entries(bucket_info, *state, olh_obj, rm_pending_entries);
+ int ret = remove_olh_pending_entries(dpp, bucket_info, *state, olh_obj, rm_pending_entries);
if (ret < 0) {
- ldout(cct, 20) << "ERROR: rm_pending_entries returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 20) << "ERROR: rm_pending_entries returned ret=" << ret << dendl;
return ret;
}
}
if (!pending_entries.empty()) {
- ldout(cct, 20) << __func__ << "(): found pending entries, need to update_olh() on bucket=" << olh_obj.bucket << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): found pending entries, need to update_olh() on bucket=" << olh_obj.bucket << dendl;
int ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj);
if (ret < 0) {
return 0;
}
-int RGWRados::raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
+int RGWRados::raw_obj_stat(const DoutPrefixProvider *dpp,
+ rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
map<string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker, optional_yield y)
{
rgw_rados_ref ref;
- int r = get_raw_obj_ref(obj, &ref);
+ int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, NULL);
}
bufferlist outbl;
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield);
if (epoch) {
*epoch = ref.pool.ioctx().get_last_version();
return 0;
}
-int RGWRados::get_bucket_stats(RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver,
+int RGWRados::get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver,
map<RGWObjCategory, RGWStorageStats>& stats, string *max_marker, bool *syncstopped)
{
vector<rgw_bucket_dir_header> headers;
map<int, string> bucket_instance_ids;
- int r = cls_bucket_head(bucket_info, shard_id, headers, &bucket_instance_ids);
+ int r = cls_bucket_head(dpp, bucket_info, shard_id, headers, &bucket_instance_ids);
if (r < 0) {
return r;
}
}
};
-int RGWRados::get_bucket_stats_async(RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *ctx)
+int RGWRados::get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *ctx)
{
int num_aio = 0;
RGWGetBucketStatsContext *get_ctx = new RGWGetBucketStatsContext(ctx, bucket_info.layout.current_index.layout.normal.num_shards ? : 1);
ceph_assert(get_ctx);
- int r = cls_bucket_head_async(bucket_info, shard_id, get_ctx, &num_aio);
+ int r = cls_bucket_head_async(dpp, bucket_info, shard_id, get_ctx, &num_aio);
if (r < 0) {
ctx->put();
if (num_aio) {
return ret;
}
- int r = cls_bucket_head(bucket_info, RGW_NO_SHARD, headers);
+ int r = cls_bucket_head(dpp, bucket_info, RGW_NO_SHARD, headers);
if (r < 0)
return r;
return m.size();
}
-int RGWRados::append_async(rgw_raw_obj& obj, size_t size, bufferlist& bl)
+int RGWRados::append_async(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, size_t size, bufferlist& bl)
{
rgw_rados_ref ref;
- int r = get_raw_obj_ref(obj, &ref);
+ int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
return r;
}
-int RGWRados::pool_iterate_begin(const rgw_pool& pool, RGWPoolIterCtx& ctx)
+int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, RGWPoolIterCtx& ctx)
{
librados::IoCtx& io_ctx = ctx.io_ctx;
librados::NObjectIterator& iter = ctx.iter;
- int r = open_pool_ctx(pool, io_ctx, false);
+ int r = open_pool_ctx(dpp, pool, io_ctx, false);
if (r < 0)
return r;
return 0;
}
-int RGWRados::pool_iterate_begin(const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx)
+int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx)
{
librados::IoCtx& io_ctx = ctx.io_ctx;
librados::NObjectIterator& iter = ctx.iter;
- int r = open_pool_ctx(pool, io_ctx, false);
+ int r = open_pool_ctx(dpp, pool, io_ctx, false);
if (r < 0)
return r;
librados::ObjectCursor oc;
if (!oc.from_str(cursor)) {
- ldout(cct, 10) << "failed to parse cursor: " << cursor << dendl;
+ ldpp_dout(dpp, 10) << "failed to parse cursor: " << cursor << dendl;
return -EINVAL;
}
return 0;
} catch (const std::system_error& e) {
r = -e.code().value();
- ldout(cct, 10) << "nobjects_begin threw " << e.what()
+ ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what()
<< ", returning " << r << dendl;
return r;
} catch (const std::exception& e) {
- ldout(cct, 10) << "nobjects_begin threw " << e.what()
+ ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what()
<< ", returning -5" << dendl;
return -EIO;
}
}
}
-int RGWRados::list_raw_objects_init(const rgw_pool& pool, const string& marker, RGWListRawObjsCtx *ctx)
+int RGWRados::list_raw_objects_init(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& marker, RGWListRawObjsCtx *ctx)
{
if (!ctx->initialized) {
- int r = pool_iterate_begin(pool, marker, ctx->iter_ctx);
+ int r = pool_iterate_begin(dpp, pool, marker, ctx->iter_ctx);
if (r < 0) {
- ldout(cct, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl;
return r;
}
ctx->initialized = true;
return 0;
}
-int RGWRados::list_raw_objects_next(const string& prefix_filter, int max,
+int RGWRados::list_raw_objects_next(const DoutPrefixProvider *dpp, const string& prefix_filter, int max,
RGWListRawObjsCtx& ctx, list<string>& oids,
bool *is_truncated)
{
int r = pool_iterate(ctx.iter_ctx, max, objs, is_truncated, &filter);
if (r < 0) {
if(r != -ENOENT)
- ldout(cct, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
return r;
}
return oids.size();
}
-int RGWRados::list_raw_objects(const rgw_pool& pool, const string& prefix_filter,
+int RGWRados::list_raw_objects(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& prefix_filter,
int max, RGWListRawObjsCtx& ctx, list<string>& oids,
bool *is_truncated)
{
if (!ctx.initialized) {
- int r = list_raw_objects_init(pool, string(), &ctx);
+ int r = list_raw_objects_init(dpp, pool, string(), &ctx);
if (r < 0) {
return r;
}
}
- return list_raw_objects_next(prefix_filter, max, ctx, oids, is_truncated);
+ return list_raw_objects_next(dpp, prefix_filter, max, ctx, oids, is_truncated);
}
string RGWRados::list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx)
return pool_iterate_get_cursor(ctx.iter_ctx);
}
-int RGWRados::bi_get_instance(const RGWBucketInfo& bucket_info, const rgw_obj& obj,
+int RGWRados::bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
rgw_bucket_dir_entry *dirent)
{
rgw_cls_bi_entry bi_entry;
- int r = bi_get(bucket_info, obj, BIIndexType::Instance, &bi_entry);
+ int r = bi_get(dpp, bucket_info, obj, BIIndexType::Instance, &bi_entry);
if (r < 0 && r != -ENOENT) {
- ldout(cct, 0) << "ERROR: bi_get() returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl;
}
if (r < 0) {
return r;
try {
decode(*dirent, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode bi_entry()" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl;
return -EIO;
}
return 0;
}
-int RGWRados::bi_get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj,
+int RGWRados::bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
rgw_bucket_olh_entry *olh)
{
rgw_cls_bi_entry bi_entry;
- int r = bi_get(bucket_info, obj, BIIndexType::OLH, &bi_entry);
+ int r = bi_get(dpp, bucket_info, obj, BIIndexType::OLH, &bi_entry);
if (r < 0 && r != -ENOENT) {
- ldout(cct, 0) << "ERROR: bi_get() returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl;
}
if (r < 0) {
return r;
try {
decode(*olh, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode bi_entry()" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl;
return -EIO;
}
return 0;
}
-int RGWRados::bi_get(const RGWBucketInfo& bucket_info, const rgw_obj& obj,
+int RGWRados::bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
BIIndexType index_type, rgw_cls_bi_entry *entry)
{
BucketShard bs(this);
- int ret = bs.init(bucket_info, obj);
+ int ret = bs.init(dpp, bucket_info, obj);
if (ret < 0) {
- ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
BucketShard bs(this);
int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp);
if (ret < 0) {
- ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
BucketShard bs(this);
int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp);
if (ret < 0) {
- ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
BucketShard bs(this);
int ret = bs.init(bucket_info.bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */, dpp);
if (ret < 0) {
- ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
return bi_list(bs, filter_obj, marker, max, entries, is_truncated);
}
-int RGWRados::gc_operate(string& oid, librados::ObjectWriteOperation *op)
+int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectWriteOperation *op)
{
- return rgw_rados_operate(gc_pool_ctx, oid, op, null_yield);
+ return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, null_yield);
}
int RGWRados::gc_aio_operate(const string& oid, librados::AioCompletion *c,
return gc_pool_ctx.aio_operate(oid, c, op);
}
-int RGWRados::gc_operate(string& oid, librados::ObjectReadOperation *op, bufferlist *pbl)
+int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectReadOperation *op, bufferlist *pbl)
{
- return rgw_rados_operate(gc_pool_ctx, oid, op, pbl, null_yield);
+ return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, pbl, null_yield);
}
int RGWRados::list_gc_objs(int *index, string& marker, uint32_t max, bool expired_only, std::list<cls_rgw_gc_obj_info>& result, bool *truncated, bool& processing_queue)
return obj_expirer->inspect_all_shards(dpp, utime_t(), ceph_clock_now());
}
-int RGWRados::cls_obj_prepare_op(BucketShard& bs, RGWModifyOp op, string& tag,
+int RGWRados::cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, string& tag,
rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *_zones_trace)
{
rgw_zone_set zones_trace;
cls_rgw_obj_key key(obj.key.get_index_key_name(), obj.key.instance);
cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_prepare_op(o, op, tag, key, obj.key.get_loc(), svc.zone->get_zone().log_data, bilog_flags, zones_trace);
- return bs.bucket_obj.operate(&o, y);
+ return bs.bucket_obj.operate(dpp, &o, y);
}
int RGWRados::cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, string& tag,
zones_trace);
}
-int RGWRados::cls_obj_set_bucket_tag_timeout(RGWBucketInfo& bucket_info, uint64_t timeout)
+int RGWRados::cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
- int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
// value - list result for the corresponding oid (shard), it is filled by
// the AIO callback
map<int, string> shard_oids;
- int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id,
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id,
&index_pool, &shard_oids,
nullptr);
if (r < 0) {
RGWSI_RADOS::Pool index_pool;
map<int, string> oids;
- int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, nullptr);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, nullptr);
if (r < 0) {
return r;
}
rgw_obj_key obj_key;
bool parsed = rgw_obj_key::parse_raw_oid(key, &obj_key);
if (!parsed) {
- ldout(cct, 0) <<
+ ldpp_dout(dpp, 0) <<
"ERROR: RGWRados::cls_bucket_list_unordered received an invalid "
"start marker: '" << start_after << "'" << dendl;
return -EINVAL;
cls_rgw_bucket_list_op(op, marker, prefix, empty_delimiter,
num_entries,
list_versions, &result);
- r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield);
+ r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, null_yield);
if (r < 0) {
return r;
}
} // RGWRados::cls_bucket_list_unordered
-int RGWRados::cls_obj_usage_log_add(const string& oid,
+int RGWRados::cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const string& oid,
rgw_usage_log_info& info)
{
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
- int r = get_raw_obj_ref(obj, &ref);
+ int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
ObjectWriteOperation op;
cls_rgw_usage_log_add(op, info);
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
return r;
}
-int RGWRados::cls_obj_usage_log_read(const string& oid, const string& user, const string& bucket,
+int RGWRados::cls_obj_usage_log_read(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket,
uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
string& read_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage,
bool *is_truncated)
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
- int r = get_raw_obj_ref(obj, &ref);
+ int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
return r;
}
-static int cls_rgw_usage_log_trim_repeat(rgw_rados_ref ref, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch)
+static int cls_rgw_usage_log_trim_repeat(const DoutPrefixProvider *dpp, rgw_rados_ref ref, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch)
{
bool done = false;
do {
librados::ObjectWriteOperation op;
cls_rgw_usage_log_trim(op, user, bucket, start_epoch, end_epoch);
- int r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ int r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
if (r == -ENODATA)
done = true;
else if (r < 0)
return 0;
}
-int RGWRados::cls_obj_usage_log_trim(const string& oid, const string& user, const string& bucket,
+int RGWRados::cls_obj_usage_log_trim(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket,
uint64_t start_epoch, uint64_t end_epoch)
{
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
- int r = get_raw_obj_ref(obj, &ref);
+ int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
- r = cls_rgw_usage_log_trim_repeat(ref, user, bucket, start_epoch, end_epoch);
+ r = cls_rgw_usage_log_trim_repeat(dpp, ref, user, bucket, start_epoch, end_epoch);
return r;
}
-int RGWRados::cls_obj_usage_log_clear(string& oid)
+int RGWRados::cls_obj_usage_log_clear(const DoutPrefixProvider *dpp, string& oid)
{
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
- int r = get_raw_obj_ref(obj, &ref);
+ int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
cls_rgw_usage_log_clear(op);
- r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
+ r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
return r;
}
-int RGWRados::remove_objs_from_index(RGWBucketInfo& bucket_info, list<rgw_obj_index_key>& oid_list)
+int RGWRados::remove_objs_from_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, list<rgw_obj_index_key>& oid_list)
{
RGWSI_RADOS::Pool index_pool;
string dir_oid;
uint8_t suggest_flag = (svc.zone->get_zone().log_data ? CEPH_RGW_DIR_SUGGEST_LOG_OP : 0);
- int r = svc.bi_rados->open_bucket_index(bucket_info, &index_pool, &dir_oid);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, &index_pool, &dir_oid);
if (r < 0)
return r;
for (auto iter = oid_list.begin(); iter != oid_list.end(); ++iter) {
rgw_bucket_dir_entry entry;
entry.key = *iter;
- dout(2) << "RGWRados::remove_objs_from_index bucket=" << bucket_info.bucket << " obj=" << entry.key.name << ":" << entry.key.instance << dendl;
+ ldpp_dout(dpp, 2) << "RGWRados::remove_objs_from_index bucket=" << bucket_info.bucket << " obj=" << entry.key.name << ":" << entry.key.instance << dendl;
entry.ver.epoch = (uint64_t)-1; // ULLONG_MAX, needed to that objclass doesn't skip out request
updates.append(CEPH_RGW_REMOVE | suggest_flag);
encode(entry, updates);
if (iter != astate->attrset.end()) {
r = decode_policy(iter->second, &owner);
if (r < 0) {
- dout(0) << "WARNING: could not decode policy for object: " << obj << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: could not decode policy for object: " << obj << dendl;
}
}
if (astate->manifest) {
RGWObjManifest::obj_iterator miter;
RGWObjManifest& manifest = *astate->manifest;
- for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) {
+ for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) {
const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(store);
rgw_obj loc;
RGWSI_Tier_RADOS::raw_obj_to_obj(manifest.get_obj().bucket, raw_loc, &loc);
if (loc.key.ns == RGW_OBJ_NS_MULTIPART) {
- dout(10) << "check_disk_state(): removing manifest part from index: " << loc << dendl;
+ ldpp_dout(dpp, 0) << "check_disk_state(): removing manifest part from index: " << loc << dendl;
r = delete_obj_index(loc, astate->mtime, dpp);
if (r < 0) {
- dout(0) << "WARNING: delete_obj_index() returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: delete_obj_index() returned r=" << r << dendl;
}
}
}
return 0;
}
-int RGWRados::cls_bucket_head(const RGWBucketInfo& bucket_info, int shard_id, vector<rgw_bucket_dir_header>& headers, map<int, string> *bucket_instance_ids)
+int RGWRados::cls_bucket_head(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, vector<rgw_bucket_dir_header>& headers, map<int, string> *bucket_instance_ids)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> oids;
map<int, struct rgw_cls_list_ret> list_results;
- int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids);
if (r < 0) {
- ldout(cct, 20) << "cls_bucket_head: open_bucket_index() returned "
+ ldpp_dout(dpp, 20) << "cls_bucket_head: open_bucket_index() returned "
<< r << dendl;
return r;
}
r = CLSRGWIssueGetDirHeader(index_pool.ioctx(), oids, list_results, cct->_conf->rgw_bucket_index_max_aio)();
if (r < 0) {
- ldout(cct, 20) << "cls_bucket_head: CLSRGWIssueGetDirHeader() returned "
+ ldpp_dout(dpp, 20) << "cls_bucket_head: CLSRGWIssueGetDirHeader() returned "
<< r << dendl;
return r;
}
return 0;
}
-int RGWRados::cls_bucket_head_async(const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio)
+int RGWRados::cls_bucket_head_async(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
- int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
"; new num shards " << final_num_shards << " (suggested " <<
suggested_num_shards << ")" << dendl;
- return add_bucket_to_reshard(bucket_info, final_num_shards, dpp);
+ return add_bucket_to_reshard(dpp, bucket_info, final_num_shards);
}
-int RGWRados::add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t new_num_shards, const DoutPrefixProvider *dpp)
+int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards)
{
RGWReshard reshard(this->store, dpp);
entry.old_num_shards = num_source_shards;
entry.new_num_shards = new_num_shards;
- return reshard.add(entry);
+ return reshard.add(dpp, entry);
}
int RGWRados::check_quota(const rgw_user& bucket_owner, rgw_bucket& bucket,
return &rados;
}
-int RGWRados::delete_raw_obj_aio(const rgw_raw_obj& obj, list<librados::AioCompletion *>& handles)
+int RGWRados::delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, list<librados::AioCompletion *>& handles)
{
rgw_rados_ref ref;
- int ret = get_raw_obj_ref(obj, &ref);
+ int ret = get_raw_obj_ref(dpp, obj, &ref);
if (ret < 0) {
- lderr(cct) << "ERROR: failed to get obj ref with ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get obj ref with ret=" << ret << dendl;
return ret;
}
AioCompletion *c = librados::Rados::aio_create_completion(nullptr, nullptr);
ret = ref.pool.ioctx().aio_operate(ref.obj.oid, c, &op);
if (ret < 0) {
- lderr(cct) << "ERROR: AioOperate failed with ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: AioOperate failed with ret=" << ret << dendl;
c->release();
return ret;
}
optional_yield y)
{
rgw_rados_ref ref;
- int ret = get_obj_head_ref(bucket_info, obj, &ref);
+ int ret = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (ret < 0) {
- lderr(cct) << "ERROR: failed to get obj ref with ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to get obj ref with ret=" << ret << dendl;
return ret;
}
ret = index_op.prepare(dpp, CLS_RGW_OP_DEL, &astate->write_tag, y);
if (ret < 0) {
- lderr(cct) << "ERROR: failed to prepare index op with ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to prepare index op with ret=" << ret << dendl;
return ret;
}
}
AioCompletion *c = librados::Rados::aio_create_completion(nullptr, nullptr);
ret = ref.pool.ioctx().aio_operate(ref.obj.oid, c, &op);
if (ret < 0) {
- lderr(cct) << "ERROR: AioOperate failed with ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: AioOperate failed with ret=" << ret << dendl;
c->release();
return ret;
}
if (keep_index_consistent) {
ret = delete_obj_index(obj, astate->mtime, dpp);
if (ret < 0) {
- lderr(cct) << "ERROR: failed to delete obj index with ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to delete obj index with ret=" << ret << dendl;
return ret;
}
}
}
}
-int rgw_policy_from_attrset(CephContext *cct, map<string, bufferlist>& attrset, RGWAccessControlPolicy *policy);
+int rgw_policy_from_attrset(const DoutPrefixProvider *dpp, CephContext *cct, map<string, bufferlist>& attrset, RGWAccessControlPolicy *policy);
struct RGWOLHInfo {
rgw_obj target;
friend class rgw::sal::RadosStore;
/** Open the pool used as root for this gateway */
- int open_root_pool_ctx();
- int open_gc_pool_ctx();
- int open_lc_pool_ctx();
- int open_objexp_pool_ctx();
- int open_reshard_pool_ctx();
- int open_notif_pool_ctx();
-
- int open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx,
+ int open_root_pool_ctx(const DoutPrefixProvider *dpp);
+ int open_gc_pool_ctx(const DoutPrefixProvider *dpp);
+ int open_lc_pool_ctx(const DoutPrefixProvider *dpp);
+ int open_objexp_pool_ctx(const DoutPrefixProvider *dpp);
+ int open_reshard_pool_ctx(const DoutPrefixProvider *dpp);
+ int open_notif_pool_ctx(const DoutPrefixProvider *dpp);
+
+ int open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
bool mostly_omap);
std::atomic<int64_t> max_req_id = { 0 };
// This field represents the number of bucket index object shards
uint32_t bucket_index_max_shards;
- int get_obj_head_ref(const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref);
- int get_system_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref);
+ int get_obj_head_ref(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref);
+ int get_system_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref);
uint64_t max_bucket_id;
int get_olh_target_state(const DoutPrefixProvider *dpp, RGWObjectCtx& rctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
bool follow_olh, optional_yield y, bool assume_noent = false);
int append_atomic_test(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
librados::ObjectOperation& op, RGWObjState **state, optional_yield y);
- int append_atomic_test(const RGWObjState* astate, librados::ObjectOperation& op);
+ int append_atomic_test(const DoutPrefixProvider *dpp, const RGWObjState* astate, librados::ObjectOperation& op);
int update_placement_map();
int store_bucket_info(RGWBucketInfo& info, map<string, bufferlist> *pattrs, RGWObjVersionTracker *objv_tracker, bool exclusive);
bool use_cache{false};
- int get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx);
+ int get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx);
public:
RGWRados(): timer(NULL),
gc(NULL), lc(NULL), obj_expirer(NULL), use_gc_thread(false), use_lc_thread(false), quota_threads(false),
return sync_tracer;
}
- int get_required_alignment(const rgw_pool& pool, uint64_t *alignment);
+ int get_required_alignment(const DoutPrefixProvider *dpp, const rgw_pool& pool, uint64_t *alignment);
void get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t *max_size);
int get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment = nullptr);
int get_max_chunk_size(const rgw_placement_rule& placement_rule, const rgw_obj& obj, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment = nullptr);
}
- int get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref);
+ int get_raw_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref);
- int list_raw_objects_init(const rgw_pool& pool, const string& marker, RGWListRawObjsCtx *ctx);
- int list_raw_objects_next(const string& prefix_filter, int max,
+ int list_raw_objects_init(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& marker, RGWListRawObjsCtx *ctx);
+ int list_raw_objects_next(const DoutPrefixProvider *dpp, const string& prefix_filter, int max,
RGWListRawObjsCtx& ctx, list<string>& oids,
bool *is_truncated);
- int list_raw_objects(const rgw_pool& pool, const string& prefix_filter, int max,
+ int list_raw_objects(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& prefix_filter, int max,
RGWListRawObjsCtx& ctx, list<string>& oids,
bool *is_truncated);
string list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx);
int update_service_map(std::map<std::string, std::string>&& status);
/// list logs
- int log_list_init(const string& prefix, RGWAccessHandle *handle);
+ int log_list_init(const DoutPrefixProvider *dpp, const string& prefix, RGWAccessHandle *handle);
int log_list_next(RGWAccessHandle handle, string *name);
/// remove log
- int log_remove(const string& name);
+ int log_remove(const DoutPrefixProvider *dpp, const string& name);
/// show log
- int log_show_init(const string& name, RGWAccessHandle *handle);
+ int log_show_init(const DoutPrefixProvider *dpp, const string& name, RGWAccessHandle *handle);
int log_show_next(RGWAccessHandle handle, rgw_log_entry *entry);
// log bandwidth info
- int log_usage(map<rgw_user_bucket, RGWUsageBatch>& usage_info);
- int read_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch,
+ int log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info);
+ int read_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool *is_truncated, RGWUsageIter& read_iter, map<rgw_user_bucket,
rgw_usage_log_entry>& usage);
- int trim_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch);
- int clear_usage();
+ int trim_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch);
+ int clear_usage(const DoutPrefixProvider *dpp);
- int create_pool(const rgw_pool& pool);
+ int create_pool(const DoutPrefixProvider *dpp, const rgw_pool& pool);
void create_bucket_id(string *bucket_id);
explicit BucketShard(RGWRados *_store) : store(_store), shard_id(-1) {}
int init(const rgw_bucket& _bucket, const rgw_obj& obj, RGWBucketInfo* out, const DoutPrefixProvider *dpp);
int init(const rgw_bucket& _bucket, int sid, const rgw::bucket_index_layout_generation& idx_layout, RGWBucketInfo* out, const DoutPrefixProvider *dpp);
- int init(const RGWBucketInfo& bucket_info, const rgw_obj& obj);
- int init(const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid);
+ int init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj);
+ int init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid);
};
class Object {
int prepare_atomic_modification(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation& op, bool reset_obj, const string *ptag,
const char *ifmatch, const char *ifnomatch, bool removal_op, bool modify_tail, optional_yield y);
- int complete_atomic_modification();
+ int complete_atomic_modification(const DoutPrefixProvider *dpp);
public:
Object(RGWRados *_store, const RGWBucketInfo& _bucket_info, RGWObjectCtx& _ctx, const rgw_obj& _obj) : store(_store), bucket_info(_bucket_info),
explicit Stat(RGWRados::Object *_source) : source(_source) {}
- int stat_async();
+ int stat_async(const DoutPrefixProvider *dpp);
int wait();
int stat();
private:
int rewrite_obj(RGWBucketInfo& dest_bucket_info, rgw::sal::Object* obj, const DoutPrefixProvider *dpp, optional_yield y);
- int stat_remote_obj(RGWObjectCtx& obj_ctx,
+ int stat_remote_obj(const DoutPrefixProvider *dpp,
+ RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
const ceph::real_time& expiration_time = ceph::real_time(),
rgw_zone_set *zones_trace = nullptr);
- int delete_raw_obj(const rgw_raw_obj& obj);
+ int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj);
/** Remove an object from the bucket index */
int delete_obj_index(const rgw_obj& obj, ceph::real_time mtime, const DoutPrefixProvider *dpp);
return get_obj_state(dpp, rctx, bucket_info, obj, state, true, y);
}
- using iterate_obj_cb = int (*)(const rgw_raw_obj&, off_t, off_t,
+ using iterate_obj_cb = int (*)(const DoutPrefixProvider*, const rgw_raw_obj&, off_t, off_t,
off_t, bool, RGWObjState*, void*);
int iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& ctx, const RGWBucketInfo& bucket_info,
uint64_t max_chunk_size, iterate_obj_cb cb, void *arg,
optional_yield y);
- int get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs,
+ int get_obj_iterate_cb(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& read_obj, off_t obj_ofs,
off_t read_ofs, off_t len, bool is_head_obj,
RGWObjState *astate, void *arg);
* a simple object read without keeping state
*/
- int raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, ceph::real_time *pmtime, uint64_t *epoch,
+ int raw_obj_stat(const DoutPrefixProvider *dpp,
+ rgw_raw_obj& obj, uint64_t *psize, ceph::real_time *pmtime, uint64_t *epoch,
map<string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker, optional_yield y);
- int obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::ObjectWriteOperation *op);
- int obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::ObjectReadOperation *op);
+ int obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::ObjectWriteOperation *op);
+ int obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::ObjectReadOperation *op);
int guard_reshard(const DoutPrefixProvider *dpp,
BucketShard *bs,
optional_yield y,
const DoutPrefixProvider *dpp);
- void bucket_index_guard_olh_op(RGWObjState& olh_state, librados::ObjectOperation& op);
- int olh_init_modification(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag);
- int olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag);
+ void bucket_index_guard_olh_op(const DoutPrefixProvider *dpp, RGWObjState& olh_state, librados::ObjectOperation& op);
+ int olh_init_modification(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag);
+ int olh_init_modification_impl(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag);
int bucket_index_link_olh(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info, RGWObjState& olh_state,
const rgw_obj& obj_instance, bool delete_marker,
int set_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, const rgw_obj& target_obj, bool delete_marker, rgw_bucket_dir_entry_meta *meta,
uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time,
optional_yield y, rgw_zone_set *zones_trace = nullptr, bool log_data_change = false);
- int repair_olh(RGWObjState* state, const RGWBucketInfo& bucket_info,
+ int repair_olh(const DoutPrefixProvider *dpp, RGWObjState* state, const RGWBucketInfo& bucket_info,
const rgw_obj& obj);
int unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj,
uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace = nullptr);
void check_pending_olh_entries(map<string, bufferlist>& pending_entries, map<string, bufferlist> *rm_pending_entries);
- int remove_olh_pending_entries(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map<string, bufferlist>& pending_attrs);
+ int remove_olh_pending_entries(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map<string, bufferlist>& pending_attrs);
int follow_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjectCtx& ctx, RGWObjState *state, const rgw_obj& olh_obj, rgw_obj *target);
- int get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh);
+ int get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh);
void gen_rand_obj_instance_name(rgw_obj_key *target_key);
void gen_rand_obj_instance_name(rgw_obj *target);
int update_containers_stats(map<string, RGWBucketEnt>& m, const DoutPrefixProvider *dpp);
- int append_async(rgw_raw_obj& obj, size_t size, bufferlist& bl);
+ int append_async(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, size_t size, bufferlist& bl);
public:
void set_atomic(void *ctx, rgw_obj& obj) {
rctx->set_prefetch_data(obj);
}
int decode_policy(bufferlist& bl, ACLOwner *owner);
- int get_bucket_stats(RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver,
+ int get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver,
map<RGWObjCategory, RGWStorageStats>& stats, string *max_marker, bool* syncstopped = NULL);
- int get_bucket_stats_async(RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *cb);
+ int get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *cb);
int put_bucket_instance_info(RGWBucketInfo& info, bool exclusive, ceph::real_time mtime, map<string, bufferlist> *pattrs, const DoutPrefixProvider *dpp);
/* xxx dang obj_ctx -> svc */
map<string, bufferlist> *pattrs, bool create_entry_point,
const DoutPrefixProvider *dpp);
- int cls_obj_prepare_op(BucketShard& bs, RGWModifyOp op, string& tag, rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *zones_trace = nullptr);
+ int cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, string& tag, rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, string& tag, int64_t pool, uint64_t epoch,
rgw_bucket_dir_entry& ent, RGWObjCategory category, list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_add(BucketShard& bs, const rgw_obj& obj, string& tag, int64_t pool, uint64_t epoch, rgw_bucket_dir_entry& ent,
int cls_obj_complete_del(BucketShard& bs, string& tag, int64_t pool, uint64_t epoch, rgw_obj& obj,
ceph::real_time& removed_mtime, list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_cancel(BucketShard& bs, string& tag, rgw_obj& obj, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
- int cls_obj_set_bucket_tag_timeout(RGWBucketInfo& bucket_info, uint64_t timeout);
+ int cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout);
using ent_map_t =
boost::container::flat_map<std::string, rgw_bucket_dir_entry>;
rgw_obj_index_key *last_entry,
optional_yield y,
check_filter_t = nullptr);
- int cls_bucket_head(const RGWBucketInfo& bucket_info, int shard_id, vector<rgw_bucket_dir_header>& headers, map<int, string> *bucket_instance_ids = NULL);
- int cls_bucket_head_async(const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio);
+ int cls_bucket_head(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, vector<rgw_bucket_dir_header>& headers, map<int, string> *bucket_instance_ids = NULL);
+ int cls_bucket_head_async(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio);
- int bi_get_instance(const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_dir_entry *dirent);
- int bi_get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_olh_entry *olh);
- int bi_get(const RGWBucketInfo& bucket_info, const rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry);
+ int bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_dir_entry *dirent);
+ int bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_olh_entry *olh);
+ int bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry);
void bi_put(librados::ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry);
int bi_put(BucketShard& bs, rgw_cls_bi_entry& entry);
int bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry);
list<rgw_cls_bi_entry> *entries, bool *is_truncated);
int bi_remove(BucketShard& bs);
- int cls_obj_usage_log_add(const string& oid, rgw_usage_log_info& info);
- int cls_obj_usage_log_read(const string& oid, const string& user, const string& bucket, uint64_t start_epoch,
+ int cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const string& oid, rgw_usage_log_info& info);
+ int cls_obj_usage_log_read(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket, uint64_t start_epoch,
uint64_t end_epoch, uint32_t max_entries, string& read_iter, map<rgw_user_bucket,
rgw_usage_log_entry>& usage, bool *is_truncated);
- int cls_obj_usage_log_trim(const string& oid, const string& user, const string& bucket, uint64_t start_epoch,
+ int cls_obj_usage_log_trim(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket, uint64_t start_epoch,
uint64_t end_epoch);
- int cls_obj_usage_log_clear(string& oid);
+ int cls_obj_usage_log_clear(const DoutPrefixProvider *dpp, string& oid);
int get_target_shard_id(const rgw::bucket_index_normal_layout& layout, const string& obj_key, int *shard_id);
int lock_exclusive(const rgw_pool& pool, const string& oid, ceph::timespan& duration, rgw_zone_id& zone_id, string& owner_id);
int unlock(const rgw_pool& pool, const string& oid, rgw_zone_id& zone_id, string& owner_id);
- void update_gc_chain(rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain);
+ void update_gc_chain(const DoutPrefixProvider *dpp, rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain);
int send_chain_to_gc(cls_rgw_obj_chain& chain, const string& tag);
- void delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag);
- int gc_operate(string& oid, librados::ObjectWriteOperation *op);
+ void delete_objs_inline(const DoutPrefixProvider *dpp, cls_rgw_obj_chain& chain, const string& tag);
+ int gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectWriteOperation *op);
int gc_aio_operate(const std::string& oid, librados::AioCompletion *c,
librados::ObjectWriteOperation *op);
- int gc_operate(string& oid, librados::ObjectReadOperation *op, bufferlist *pbl);
+ int gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectReadOperation *op, bufferlist *pbl);
int list_gc_objs(int *index, string& marker, uint32_t max, bool expired_only, std::list<cls_rgw_gc_obj_info>& result, bool *truncated, bool& processing_queue);
int process_gc(bool expired_only);
int list_lc_progress(string& marker, uint32_t max_entries,
vector<rgw::sal::Lifecycle::LCEntry>& progress_map, int& index);
- int bucket_check_index(RGWBucketInfo& bucket_info,
+ int bucket_check_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,
map<RGWObjCategory, RGWStorageStats> *existing_stats,
map<RGWObjCategory, RGWStorageStats> *calculated_stats);
- int bucket_rebuild_index(RGWBucketInfo& bucket_info);
- int bucket_set_reshard(const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry);
- int remove_objs_from_index(RGWBucketInfo& bucket_info, list<rgw_obj_index_key>& oid_list);
- int move_rados_obj(librados::IoCtx& src_ioctx,
+ int bucket_rebuild_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info);
+ int bucket_set_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry);
+ int remove_objs_from_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, list<rgw_obj_index_key>& oid_list);
+ int move_rados_obj(const DoutPrefixProvider *dpp,
+ librados::IoCtx& src_ioctx,
const string& src_oid, const string& src_locator,
librados::IoCtx& dst_ioctx,
const string& dst_oid, const string& dst_locator);
- int fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key);
+ int fix_head_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key);
int fix_tail_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, rgw_obj_key& key, bool fix, bool *need_fix, optional_yield y);
int check_quota(const rgw_user& bucket_owner, rgw_bucket& bucket,
int check_bucket_shards(const RGWBucketInfo& bucket_info, const rgw_bucket& bucket,
uint64_t num_objs, const DoutPrefixProvider *dpp);
- int add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t new_num_shards, const DoutPrefixProvider *dpp);
+ int add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards);
uint64_t instance_id();
librados::Rados* get_rados_handle();
- int delete_raw_obj_aio(const rgw_raw_obj& obj, list<librados::AioCompletion *>& handles);
+ int delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, list<librados::AioCompletion *>& handles);
int delete_obj_aio(const DoutPrefixProvider *dpp, const rgw_obj& obj, RGWBucketInfo& info, RGWObjState *astate,
list<librados::AioCompletion *>& handles, bool keep_index_consistent,
optional_yield y);
* ctx: context object to use for the iteration
* Returns: 0 on success, -ERR# otherwise.
*/
- int pool_iterate_begin(const rgw_pool& pool, RGWPoolIterCtx& ctx);
+ int pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, RGWPoolIterCtx& ctx);
/**
* Init pool iteration
* ctx: context object to use for the iteration
* Returns: 0 on success, -ERR# otherwise.
*/
- int pool_iterate_begin(const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx);
+ int pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx);
/**
* Get pool iteration position
void RGWRealmReloader::reload()
{
CephContext *const cct = store->ctx();
- ldout(cct, 1) << "Pausing frontends for realm update..." << dendl;
+ const DoutPrefix dp(cct, dout_subsys, "rgw realm reloader: ");
+ ldpp_dout(&dp, 1) << "Pausing frontends for realm update..." << dendl;
frontends->pause();
- ldout(cct, 1) << "Frontends paused" << dendl;
+ ldpp_dout(&dp, 1) << "Frontends paused" << dendl;
// TODO: make RGWRados responsible for rgw_log_usage lifetime
rgw_log_usage_finalize();
StoreManager::close_storage(store);
store = nullptr;
- ldout(cct, 1) << "Store closed" << dendl;
+ ldpp_dout(&dp, 1) << "Store closed" << dendl;
{
// allow a new notify to reschedule us. it's important that we do this
// before we start loading the new realm, or we could miss some updates
reload_scheduled = nullptr;
}
- const DoutPrefix dp(cct, dout_subsys, "rgw realm reloader: ");
while (!store) {
// recreate and initialize a new store
cct->_conf.get_val<bool>("rgw_dynamic_resharding"),
cct->_conf->rgw_cache_enabled);
- ldout(cct, 1) << "Creating new store" << dendl;
+ ldpp_dout(&dp, 1) << "Creating new store" << dendl;
rgw::sal::Store* store_cleanup = nullptr;
{
// sleep until we get another notification, and retry until we get
// a working configuration
if (store == nullptr) {
- lderr(cct) << "Failed to reinitialize RGWRados after a realm "
+ ldpp_dout(&dp, -1) << "Failed to reinitialize RGWRados after a realm "
"configuration update. Waiting for a new update." << dendl;
// sleep until another event is scheduled
}
if (store_cleanup) {
- ldout(cct, 4) << "Got another notification, restarting RGWRados "
+ ldpp_dout(&dp, 4) << "Got another notification, restarting RGWRados "
"initialization." << dendl;
StoreManager::close_storage(store_cleanup);
int r = store->register_to_service_map("rgw", service_map_meta);
if (r < 0) {
- lderr(cct) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
+ ldpp_dout(&dp, -1) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
/* ignore error */
}
- ldout(cct, 1) << "Finishing initialization of new store" << dendl;
+ ldpp_dout(&dp, 1) << "Finishing initialization of new store" << dendl;
// finish initializing the new store
- ldout(cct, 1) << " - REST subsystem init" << dendl;
+ ldpp_dout(&dp, 1) << " - REST subsystem init" << dendl;
rgw_rest_init(cct, store->get_zone()->get_zonegroup());
- ldout(cct, 1) << " - usage subsystem init" << dendl;
+ ldpp_dout(&dp, 1) << " - usage subsystem init" << dendl;
rgw_log_usage_init(cct, store);
- ldout(cct, 1) << "Resuming frontends with new realm configuration." << dendl;
+ ldpp_dout(&dp, 1) << "Resuming frontends with new realm configuration." << dendl;
frontends->resume(store);
}
#define dout_prefix (*_dout << "rgw realm watcher: ")
-RGWRealmWatcher::RGWRealmWatcher(CephContext* cct, const RGWRealm& realm)
+RGWRealmWatcher::RGWRealmWatcher(const DoutPrefixProvider *dpp, CephContext* cct, const RGWRealm& realm)
: cct(cct)
{
// no default realm, nothing to watch
if (realm.get_id().empty()) {
- ldout(cct, 4) << "No realm, disabling dynamic reconfiguration." << dendl;
+ ldpp_dout(dpp, 4) << "No realm, disabling dynamic reconfiguration." << dendl;
return;
}
// establish the watch on RGWRealm
- int r = watch_start(realm);
+ int r = watch_start(dpp, realm);
if (r < 0) {
- lderr(cct) << "Failed to establish a watch on RGWRealm, "
+ ldpp_dout(dpp, -1) << "Failed to establish a watch on RGWRealm, "
"disabling dynamic reconfiguration." << dendl;
return;
}
watch_restart();
}
-int RGWRealmWatcher::watch_start(const RGWRealm& realm)
+int RGWRealmWatcher::watch_start(const DoutPrefixProvider *dpp, const RGWRealm& realm)
{
// initialize a Rados client
int r = rados.init_with_context(cct);
if (r < 0) {
- lderr(cct) << "Rados client initialization failed with "
+ ldpp_dout(dpp, -1) << "Rados client initialization failed with "
<< cpp_strerror(-r) << dendl;
return r;
}
r = rados.connect();
if (r < 0) {
- lderr(cct) << "Rados client connection failed with "
+ ldpp_dout(dpp, -1) << "Rados client connection failed with "
<< cpp_strerror(-r) << dendl;
return r;
}
// open an IoCtx for the realm's pool
rgw_pool pool(realm.get_pool(cct));
- r = rgw_init_ioctx(&rados, pool, pool_ctx);
+ r = rgw_init_ioctx(dpp, &rados, pool, pool_ctx);
if (r < 0) {
- lderr(cct) << "Failed to open pool " << pool
+ ldpp_dout(dpp, -1) << "Failed to open pool " << pool
<< " with " << cpp_strerror(-r) << dendl;
rados.shutdown();
return r;
auto oid = realm.get_control_oid();
r = pool_ctx.watch2(oid, &watch_handle, this);
if (r < 0) {
- lderr(cct) << "Failed to watch " << oid
+ ldpp_dout(dpp, -1) << "Failed to watch " << oid
<< " with " << cpp_strerror(-r) << dendl;
pool_ctx.close();
rados.shutdown();
return r;
}
- ldout(cct, 10) << "Watching " << oid << dendl;
+ ldpp_dout(dpp, 10) << "Watching " << oid << dendl;
std::swap(watch_oid, oid);
return 0;
}
bufferlist::const_iterator& p) = 0;
};
- RGWRealmWatcher(CephContext* cct, const RGWRealm& realm);
+ RGWRealmWatcher(const DoutPrefixProvider *dpp, CephContext* cct, const RGWRealm& realm);
~RGWRealmWatcher() override;
/// register a watcher for the given notification type
uint64_t watch_handle = 0;
std::string watch_oid;
- int watch_start(const RGWRealm& realm);
+ int watch_start(const DoutPrefixProvider *dpp, const RGWRealm& realm);
int watch_restart();
void watch_stop();
outer_reshard_lock(_outer_reshard_lock)
{ }
-int RGWBucketReshard::set_resharding_status(rgw::sal::RadosStore* store,
+int RGWBucketReshard::set_resharding_status(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info,
const string& new_instance_id,
int32_t num_shards,
cls_rgw_reshard_status status)
{
if (new_instance_id.empty()) {
- ldout(store->ctx(), 0) << __func__ << " missing new bucket instance id" << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " missing new bucket instance id" << dendl;
return -EINVAL;
}
cls_rgw_bucket_instance_entry instance_entry;
instance_entry.set_status(new_instance_id, num_shards, status);
- int ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry);
+ int ret = store->getRados()->bucket_set_reshard(dpp, bucket_info, instance_entry);
if (ret < 0) {
- ldout(store->ctx(), 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: "
+ ldpp_dout(dpp, 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: "
<< cpp_strerror(-ret) << dendl;
return ret;
}
}
// reshard lock assumes lock is held
-int RGWBucketReshard::clear_resharding(rgw::sal::RadosStore* store,
+int RGWBucketReshard::clear_resharding(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info)
{
- int ret = clear_index_shard_reshard_status(store, bucket_info);
+ int ret = clear_index_shard_reshard_status(dpp, store, bucket_info);
if (ret < 0) {
- ldout(store->ctx(), 0) << "RGWBucketReshard::" << __func__ <<
+ ldpp_dout(dpp, 0) << "RGWBucketReshard::" << __func__ <<
" ERROR: error clearing reshard status from index shard " <<
cpp_strerror(-ret) << dendl;
return ret;
}
cls_rgw_bucket_instance_entry instance_entry;
- ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry);
+ ret = store->getRados()->bucket_set_reshard(dpp, bucket_info, instance_entry);
if (ret < 0) {
- ldout(store->ctx(), 0) << "RGWReshard::" << __func__ <<
+ ldpp_dout(dpp, 0) << "RGWReshard::" << __func__ <<
" ERROR: error setting bucket resharding flag on bucket index: " <<
cpp_strerror(-ret) << dendl;
return ret;
return 0;
}
-int RGWBucketReshard::clear_index_shard_reshard_status(rgw::sal::RadosStore* store,
+int RGWBucketReshard::clear_index_shard_reshard_status(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info)
{
uint32_t num_shards = bucket_info.layout.current_index.layout.normal.num_shards;
if (num_shards < std::numeric_limits<uint32_t>::max()) {
- int ret = set_resharding_status(store, bucket_info,
+ int ret = set_resharding_status(dpp, store, bucket_info,
bucket_info.bucket.bucket_id,
(num_shards < 1 ? 1 : num_shards),
cls_rgw_reshard_status::NOT_RESHARDING);
if (ret < 0) {
- ldout(store->ctx(), 0) << "RGWBucketReshard::" << __func__ <<
+ ldpp_dout(dpp, 0) << "RGWBucketReshard::" << __func__ <<
" ERROR: error clearing reshard status from index shard " <<
cpp_strerror(-ret) << dendl;
return ret;
new_bucket_info.new_bucket_instance_id.clear();
new_bucket_info.reshard_status = cls_rgw_reshard_status::NOT_RESHARDING;
- int ret = store->svc()->bi->init_index(new_bucket_info);
+ int ret = store->svc()->bi->init_index(dpp, new_bucket_info);
if (ret < 0) {
cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl;
return ret;
bucket_info, bucket_attrs, new_bucket_info, dpp);
}
-int RGWBucketReshard::cancel()
+int RGWBucketReshard::cancel(const DoutPrefixProvider *dpp)
{
int ret = reshard_lock.lock();
if (ret < 0) {
return ret;
}
- ret = clear_resharding();
+ ret = clear_resharding(dpp);
reshard_lock.unlock();
return ret;
if (in_progress) {
// resharding must not have ended correctly, clean up
int ret =
- RGWBucketReshard::clear_index_shard_reshard_status(store, bucket_info);
+ RGWBucketReshard::clear_index_shard_reshard_status(dpp, store, bucket_info);
if (ret < 0) {
- lderr(store->ctx()) << "Error: " << __func__ <<
+ ldpp_dout(dpp, -1) << "Error: " << __func__ <<
" clear_index_shard_status returned " << ret << dendl;
}
bucket_info.new_bucket_instance_id.clear();
}
int ret = store->getRados()->get_target_shard_id(new_bucket_info.layout.current_index.layout.normal, obj.get_hash_object(), &target_shard_id);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl;
return ret;
}
}
ret = reshard_lock.renew(now);
if (ret < 0) {
- lderr(store->ctx()) << "Error renewing bucket lock: " << ret << dendl;
+ ldpp_dout(dpp, -1) << "Error renewing bucket lock: " << ret << dendl;
return ret;
}
}
ret = target_shards_mgr.finish();
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: failed to reshard" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to reshard" << dendl;
return -EIO;
}
// NB: some error clean-up is done by ~BucketInfoReshardUpdate
} // RGWBucketReshard::do_reshard
-int RGWBucketReshard::get_status(list<cls_rgw_bucket_instance_entry> *status)
+int RGWBucketReshard::get_status(const DoutPrefixProvider *dpp, list<cls_rgw_bucket_instance_entry> *status)
{
- return store->svc()->bi_rados->get_reshard_status(bucket_info, status);
+ return store->svc()->bi_rados->get_reshard_status(dpp, bucket_info, status);
}
}
if (reshard_log) {
- ret = reshard_log->update(bucket_info, new_bucket_info);
+ ret = reshard_log->update(dpp, bucket_info, new_bucket_info);
if (ret < 0) {
goto error_out;
}
// set resharding status of current bucket_info & shards with
// information about planned resharding
- ret = set_resharding_status(new_bucket_info.bucket.bucket_id,
+ ret = set_resharding_status(dpp, new_bucket_info.bucket.bucket_id,
num_shards, cls_rgw_reshard_status::IN_PROGRESS);
if (ret < 0) {
goto error_out;
// best effort and don't report out an error; the lock isn't needed
// at this point since all we're using a best effor to to remove old
// shard objects
- ret = store->svc()->bi->clean_index(bucket_info);
+ ret = store->svc()->bi->clean_index(dpp, bucket_info);
if (ret < 0) {
- lderr(store->ctx()) << "Error: " << __func__ <<
+ ldpp_dout(dpp, -1) << "Error: " << __func__ <<
" failed to clean up old shards; " <<
"RGWRados::clean_bucket_index returned " << ret << dendl;
}
// since the real problem is the issue that led to this error code
// path, we won't touch ret and instead use another variable to
// temporarily error codes
- int ret2 = store->svc()->bi->clean_index(new_bucket_info);
+ int ret2 = store->svc()->bi->clean_index(dpp, new_bucket_info);
if (ret2 < 0) {
- lderr(store->ctx()) << "Error: " << __func__ <<
+ ldpp_dout(dpp, -1) << "Error: " << __func__ <<
" failed to clean up shards from failed incomplete resharding; " <<
"RGWRados::clean_bucket_index returned " << ret2 << dendl;
}
get_logshard_oid(int(sid), oid);
}
-int RGWReshard::add(cls_rgw_reshard_entry& entry)
+int RGWReshard::add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry)
{
if (!store->svc()->zone->can_reshard()) {
- ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << " Resharding is disabled" << dendl;
return 0;
}
librados::ObjectWriteOperation op;
cls_rgw_reshard_add(op, entry);
- int ret = rgw_rados_operate(store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield);
+ int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
return ret;
}
return 0;
}
-int RGWReshard::update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info)
+int RGWReshard::update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info)
{
cls_rgw_reshard_entry entry;
entry.bucket_name = bucket_info.bucket.name;
entry.new_instance_id = new_bucket_info.bucket.name + ":" + new_bucket_info.bucket.bucket_id;
- ret = add(entry);
+ ret = add(dpp, entry);
if (ret < 0) {
- ldout(store->ctx(), 0) << __func__ << ":Error in updating entry bucket " << entry.bucket_name << ": " <<
+ ldpp_dout(dpp, 0) << __func__ << ":Error in updating entry bucket " << entry.bucket_name << ": " <<
cpp_strerror(-ret) << dendl;
}
return 0;
}
-int RGWReshard::remove(cls_rgw_reshard_entry& entry)
+int RGWReshard::remove(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry)
{
string logshard_oid;
librados::ObjectWriteOperation op;
cls_rgw_reshard_remove(op, entry);
- int ret = rgw_rados_operate(store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield);
+ int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
return ret;
}
": removing reshard queue entry for a resharded or non-existent bucket" <<
entry.bucket_name << dendl;
- ret = remove(entry);
+ ret = remove(dpp, entry);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
": Error removing non-existent bucket " <<
" removing reshard queue entry for bucket " << entry.bucket_name <<
dendl;
- ret = remove(entry);
+ ret = remove(dpp, entry);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ << ": Error removing bucket " <<
entry.bucket_name << " from resharding queue: " <<
bool verbose = false, ostream *out = nullptr,
Formatter *formatter = nullptr,
RGWReshard *reshard_log = nullptr);
- int get_status(std::list<cls_rgw_bucket_instance_entry> *status);
- int cancel();
- static int clear_resharding(rgw::sal::RadosStore* store,
+ int get_status(const DoutPrefixProvider *dpp, std::list<cls_rgw_bucket_instance_entry> *status);
+ int cancel(const DoutPrefixProvider *dpp);
+ static int clear_resharding(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info);
- int clear_resharding() {
- return clear_resharding(store, bucket_info);
+ int clear_resharding(const DoutPrefixProvider *dpp) {
+ return clear_resharding(dpp, store, bucket_info);
}
- static int clear_index_shard_reshard_status(rgw::sal::RadosStore* store,
+ static int clear_index_shard_reshard_status(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info);
- int clear_index_shard_reshard_status() {
- return clear_index_shard_reshard_status(store, bucket_info);
+ int clear_index_shard_reshard_status(const DoutPrefixProvider *dpp) {
+ return clear_index_shard_reshard_status(dpp, store, bucket_info);
}
- static int set_resharding_status(rgw::sal::RadosStore* store,
+ static int set_resharding_status(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info,
const string& new_instance_id,
int32_t num_shards,
cls_rgw_reshard_status status);
- int set_resharding_status(const string& new_instance_id,
+ int set_resharding_status(const DoutPrefixProvider *dpp, const string& new_instance_id,
int32_t num_shards,
cls_rgw_reshard_status status) {
- return set_resharding_status(store, bucket_info,
+ return set_resharding_status(dpp, store, bucket_info,
new_instance_id, num_shards, status);
}
public:
RGWReshard(rgw::sal::RadosStore* _store, bool _verbose = false, ostream *_out = nullptr, Formatter *_formatter = nullptr);
- int add(cls_rgw_reshard_entry& entry);
- int update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info);
+ int add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry);
+ int update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info);
int get(cls_rgw_reshard_entry& entry);
- int remove(cls_rgw_reshard_entry& entry);
+ int remove(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry);
int list(int logshard_num, string& marker, uint32_t max, std::list<cls_rgw_reshard_entry>& entries, bool *is_truncated);
int clear_bucket_resharding(const string& bucket_instance_oid, cls_rgw_reshard_entry& entry);
}
} catch (const std::exception& e) {
- lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl;
+ ldpp_dout(op, -1) << "Error reading IAM User Policy: " << e.what() << dendl;
}
}
rgw_build_iam_environment(store, s);
op_state.set_new_bucket_name(new_bucket_name);
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_state.set_bucket_name(bucket);
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
string canonical_header;
- if (!rgw_create_s3_canonical_header(info, NULL, canonical_header, false)) {
+ if (!rgw_create_s3_canonical_header(dpp, info, NULL, canonical_header, false)) {
ldpp_dout(dpp, 0) << "failed to create canonical s3 header" << dendl;
return -EINVAL;
}
}
}
-int RGWRESTSimpleRequest::forward_request(RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y)
+int RGWRESTSimpleRequest::forward_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y)
{
string date_str;
new_env.set("HTTP_X_AMZ_CONTENT_SHA256", maybe_payload_hash);
}
- int ret = sign_request(this, key, region, service, new_env, new_info, nullptr);
+ int ret = sign_request(dpp, key, region, service, new_env, new_info, nullptr);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to sign request" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to sign request" << dendl;
return ret;
}
}
}
-int RGWRESTGenerateHTTPHeaders::set_obj_attrs(map<string, bufferlist>& rgw_attrs)
+int RGWRESTGenerateHTTPHeaders::set_obj_attrs(const DoutPrefixProvider *dpp, map<string, bufferlist>& rgw_attrs)
{
map<string, string> new_attrs;
}
RGWAccessControlPolicy policy;
- int ret = rgw_policy_from_attrset(cct, rgw_attrs, &policy);
+ int ret = rgw_policy_from_attrset(dpp, cct, rgw_attrs, &policy);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: couldn't get policy ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't get policy ret=" << ret << dendl;
return ret;
}
add_grants_headers(grants_by_type, *new_env, new_info->x_meta_map);
}
-int RGWRESTGenerateHTTPHeaders::sign(RGWAccessKey& key, const bufferlist *opt_content)
+int RGWRESTGenerateHTTPHeaders::sign(const DoutPrefixProvider *dpp, RGWAccessKey& key, const bufferlist *opt_content)
{
- int ret = sign_request(this, key, region, service, *new_env, *new_info, opt_content);
+ int ret = sign_request(dpp, key, region, service, *new_env, *new_info, opt_content);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to sign request" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to sign request" << dendl;
return ret;
}
url = headers_gen.get_url();
}
-void RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, map<string, bufferlist>& rgw_attrs)
+void RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, map<string, bufferlist>& rgw_attrs)
{
- headers_gen.set_obj_attrs(rgw_attrs);
+ headers_gen.set_obj_attrs(dpp, rgw_attrs);
- send_ready(key);
+ send_ready(dpp, key);
}
-void RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, const map<string, string>& http_attrs,
+void RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, const map<string, string>& http_attrs,
RGWAccessControlPolicy& policy)
{
headers_gen.set_http_attrs(http_attrs);
headers_gen.set_policy(policy);
- send_ready(key);
+ send_ready(dpp, key);
}
-void RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key)
+void RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key)
{
- headers_gen.sign(key, nullptr);
+ headers_gen.sign(dpp, key, nullptr);
for (const auto& kv: new_env.get_map()) {
headers.emplace_back(kv);
out_cb = new RGWRESTStreamOutCB(this);
}
-void RGWRESTStreamS3PutObj::put_obj_init(RGWAccessKey& key, rgw::sal::Object* obj, map<string, bufferlist>& attrs)
+void RGWRESTStreamS3PutObj::put_obj_init(const DoutPrefixProvider *dpp, RGWAccessKey& key, rgw::sal::Object* obj, map<string, bufferlist>& attrs)
{
send_init(obj);
- send_ready(key, attrs);
+ send_ready(dpp, key, attrs);
}
void set_str_from_headers(map<string, string>& out_headers, const string& header_name, string& str)
*resource = urlsafe_bucket + "/" + urlsafe_object;
}
-int RGWRESTStreamRWRequest::send_request(RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr)
+int RGWRESTStreamRWRequest::send_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr)
{
string resource;
send_prepare_convert(obj, &resource);
- return send_request(&key, extra_headers, resource, mgr);
+ return send_request(dpp, &key, extra_headers, resource, mgr);
}
-int RGWRESTStreamRWRequest::send_prepare(RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj)
+int RGWRESTStreamRWRequest::send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj)
{
string resource;
send_prepare_convert(obj, &resource);
- return do_send_prepare(&key, extra_headers, resource);
+ return do_send_prepare(dpp, &key, extra_headers, resource);
}
-int RGWRESTStreamRWRequest::send_prepare(RGWAccessKey *key, map<string, string>& extra_headers, const string& resource,
+int RGWRESTStreamRWRequest::send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map<string, string>& extra_headers, const string& resource,
bufferlist *send_data)
{
string new_resource;
//do not encode slash
url_encode(resource, new_resource, false);
- return do_send_prepare(key, extra_headers, new_resource, send_data);
+ return do_send_prepare(dpp, key, extra_headers, new_resource, send_data);
}
-int RGWRESTStreamRWRequest::do_send_prepare(RGWAccessKey *key, map<string, string>& extra_headers, const string& resource,
+int RGWRESTStreamRWRequest::do_send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map<string, string>& extra_headers, const string& resource,
bufferlist *send_data)
{
string new_url = url;
return 0;
}
-int RGWRESTStreamRWRequest::send_request(RGWAccessKey *key, map<string, string>& extra_headers, const string& resource,
+int RGWRESTStreamRWRequest::send_request(const DoutPrefixProvider *dpp, RGWAccessKey *key, map<string, string>& extra_headers, const string& resource,
RGWHTTPManager *mgr, bufferlist *send_data)
{
- int ret = send_prepare(key, extra_headers, resource, send_data);
+ int ret = send_prepare(dpp, key, extra_headers, resource, send_data);
if (ret < 0) {
return ret;
}
int RGWRESTStreamRWRequest::send(RGWHTTPManager *mgr)
{
if (!headers_gen) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): send_prepare() was not called: likey a bug!" << dendl;
+ ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): send_prepare() was not called: likey a bug!" << dendl;
return -EINVAL;
}
}
if (sign_key) {
- int r = headers_gen->sign(*sign_key, outblp);
+ int r = headers_gen->sign(this, *sign_key, outblp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to sign request" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to sign request" << dendl;
return r;
}
}
param_vec_t *_headers, param_vec_t *_params,
std::optional<std::string> _api_name) : RGWHTTPSimpleRequest(_cct, _method, _url, _headers, _params), api_name(_api_name) {}
- int forward_request(RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y);
+ int forward_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y);
};
class RGWWriteDrainCB {
const string& resource, const param_vec_t& params,
std::optional<string> api_name);
void set_extra_headers(const map<string, string>& extra_headers);
- int set_obj_attrs(map<string, bufferlist>& rgw_attrs);
+ int set_obj_attrs(const DoutPrefixProvider *dpp, map<string, bufferlist>& rgw_attrs);
void set_http_attrs(const map<string, string>& http_attrs);
void set_policy(RGWAccessControlPolicy& policy);
- int sign(RGWAccessKey& key, const bufferlist *opt_content);
+ int sign(const DoutPrefixProvider *dpp, RGWAccessKey& key, const bufferlist *opt_content);
const string& get_url() { return url; }
};
}
virtual ~RGWRESTStreamRWRequest() override {}
- int send_prepare(RGWAccessKey *key, map<string, string>& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */);
- int send_prepare(RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj);
+ int send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map<string, string>& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */);
+ int send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj);
int send(RGWHTTPManager *mgr);
- int send_request(RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr);
- int send_request(RGWAccessKey *key, map<string, string>& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data = nullptr /* optional input data */);
+ int send_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, map<string, string>& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr);
+ int send_request(const DoutPrefixProvider *dpp, RGWAccessKey *key, map<string, string>& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data = nullptr /* optional input data */);
void add_params(param_vec_t *params);
private:
- int do_send_prepare(RGWAccessKey *key, map<string, string>& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */);
+ int do_send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map<string, string>& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */);
};
class RGWRESTStreamReadRequest : public RGWRESTStreamRWRequest {
~RGWRESTStreamS3PutObj() override;
void send_init(rgw::sal::Object* obj);
- void send_ready(RGWAccessKey& key, map<string, bufferlist>& rgw_attrs);
- void send_ready(RGWAccessKey& key, const map<string, string>& http_attrs,
+ void send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, map<string, bufferlist>& rgw_attrs);
+ void send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, const map<string, string>& http_attrs,
RGWAccessControlPolicy& policy);
- void send_ready(RGWAccessKey& key);
+ void send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key);
- void put_obj_init(RGWAccessKey& key, rgw::sal::Object* obj, map<string, bufferlist>& attrs);
+ void put_obj_init(const DoutPrefixProvider *dpp, RGWAccessKey& key, rgw::sal::Object* obj, map<string, bufferlist>& attrs);
RGWGetDataCB *get_out_cb() { return out_cb; }
};
#define dout_subsys ceph_subsys_rgw
void RGWOp_ZoneGroupMap_Get::execute(optional_yield y) {
- op_ret = zonegroup_map.read(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
+ op_ret = zonegroup_map.read(this, g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
if (op_ret < 0) {
- dout(5) << "failed to read zone_group map" << dendl;
+ ldpp_dout(this, 5) << "failed to read zone_group map" << dendl;
}
}
populate_zonegroup(params, zonegroup);
}
-int RGWRESTConn::forward(const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y)
+int RGWRESTConn::forward(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y)
{
string url;
int ret = get_url(url);
params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "ver", buf));
}
RGWRESTSimpleRequest req(cct, info.method, url, NULL, ¶ms, api_name);
- return req.forward_request(key, info, max_response, inbl, outbl, y);
+ return req.forward_request(dpp, key, info, max_response, inbl, outbl, y);
}
int RGWRESTConn::put_obj_send_init(rgw::sal::Object* obj, const rgw_http_param_pair *extra_params, RGWRESTStreamS3PutObj **req)
return 0;
}
-int RGWRESTConn::put_obj_async_init(const rgw_user& uid, rgw::sal::Object* obj,
+int RGWRESTConn::put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_user& uid, rgw::sal::Object* obj,
map<string, bufferlist>& attrs,
RGWRESTStreamS3PutObj **req)
{
param_vec_t params;
populate_params(params, &uid, self_zone_group);
RGWRESTStreamS3PutObj *wr = new RGWRESTStreamS3PutObj(cct, "PUT", url, NULL, ¶ms, api_name, host_style);
- wr->put_obj_init(key, obj, attrs);
+ wr->put_obj_init(dpp, key, obj, attrs);
*req = wr;
return 0;
}
}
-int RGWRESTConn::get_obj(const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj,
+int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj,
const real_time *mod_ptr, const real_time *unmod_ptr,
uint32_t mod_zone_id, uint64_t mod_pg_ver,
bool prepend_metadata, bool get_op, bool rgwx_stat,
params.sync_manifest = sync_manifest;
params.skip_decrypt = skip_decrypt;
params.cb = cb;
- return get_obj(obj, params, send, req);
+ return get_obj(dpp, obj, params, send, req);
}
-int RGWRESTConn::get_obj(const rgw::sal::Object* obj, const get_obj_params& in_params, bool send, RGWRESTStreamRWRequest **req)
+int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw::sal::Object* obj, const get_obj_params& in_params, bool send, RGWRESTStreamRWRequest **req)
{
string url;
int ret = get_url(url);
set_header(buf, extra_headers, "RANGE");
}
- int r = (*req)->send_prepare(key, extra_headers, obj->get_obj());
+ int r = (*req)->send_prepare(dpp, key, extra_headers, obj->get_obj());
if (r < 0) {
goto done_err;
}
return ret;
}
-int RGWRESTConn::get_resource(const string& resource,
+int RGWRESTConn::get_resource(const DoutPrefixProvider *dpp,
+ const string& resource,
param_vec_t *extra_params,
map<string, string> *extra_headers,
bufferlist& bl,
headers.insert(extra_headers->begin(), extra_headers->end());
}
- ret = req.send_request(&key, headers, resource, mgr, send_data);
+ ret = req.send_request(dpp, &key, headers, resource, mgr, send_data);
if (ret < 0) {
- ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
return ret;
}
req.set_params(¶ms);
}
-int RGWRESTReadResource::read(optional_yield y)
+int RGWRESTReadResource::read(const DoutPrefixProvider *dpp, optional_yield y)
{
- int ret = req.send_request(&conn->get_key(), headers, resource, mgr);
+ int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr);
if (ret < 0) {
- ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
return ret;
}
return req.complete_request(y);
}
-int RGWRESTReadResource::aio_read()
+int RGWRESTReadResource::aio_read(const DoutPrefixProvider *dpp)
{
- int ret = req.send_request(&conn->get_key(), headers, resource, mgr);
+ int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr);
if (ret < 0) {
- ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
return ret;
}
req.set_params(¶ms);
}
-int RGWRESTSendResource::send(bufferlist& outbl, optional_yield y)
+int RGWRESTSendResource::send(const DoutPrefixProvider *dpp, bufferlist& outbl, optional_yield y)
{
req.set_send_length(outbl.length());
req.set_outbl(outbl);
- int ret = req.send_request(&conn->get_key(), headers, resource, mgr);
+ int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr);
if (ret < 0) {
- ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
return ret;
}
return req.complete_request(y);
}
-int RGWRESTSendResource::aio_send(bufferlist& outbl)
+int RGWRESTSendResource::aio_send(const DoutPrefixProvider *dpp, bufferlist& outbl)
{
req.set_send_length(outbl.length());
req.set_outbl(outbl);
- int ret = req.send_request(&conn->get_key(), headers, resource, mgr);
+ int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr);
if (ret < 0) {
- ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl;
return ret;
}
virtual void populate_params(param_vec_t& params, const rgw_user *uid, const string& zonegroup);
/* sync request */
- int forward(const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y);
+ int forward(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y);
/* async requests */
int put_obj_send_init(rgw::sal::Object* obj, const rgw_http_param_pair *extra_params, RGWRESTStreamS3PutObj **req);
- int put_obj_async_init(const rgw_user& uid, rgw::sal::Object* obj,
+ int put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_user& uid, rgw::sal::Object* obj,
map<string, bufferlist>& attrs, RGWRESTStreamS3PutObj **req);
int complete_request(RGWRESTStreamS3PutObj *req, string& etag,
ceph::real_time *mtime, optional_yield y);
uint64_t range_end{0};
};
- int get_obj(const rgw::sal::Object* obj, const get_obj_params& params, bool send, RGWRESTStreamRWRequest **req);
+ int get_obj(const DoutPrefixProvider *dpp, const rgw::sal::Object* obj, const get_obj_params& params, bool send, RGWRESTStreamRWRequest **req);
- int get_obj(const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj,
+ int get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj,
const ceph::real_time *mod_ptr, const ceph::real_time *unmod_ptr,
uint32_t mod_zone_id, uint64_t mod_pg_ver,
bool prepend_metadata, bool get_op, bool rgwx_stat, bool sync_manifest,
map<string, string> *pheaders,
optional_yield y);
- int get_resource(const string& resource,
+ int get_resource(const DoutPrefixProvider *dpp,
+ const string& resource,
param_vec_t *extra_params,
map<string, string>* extra_headers,
bufferlist& bl,
optional_yield y);
template <class T>
- int get_json_resource(const string& resource, param_vec_t *params,
+ int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params,
bufferlist *in_data, optional_yield y, T& t);
template <class T>
- int get_json_resource(const string& resource, param_vec_t *params,
+ int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params,
optional_yield y, T& t);
template <class T>
- int get_json_resource(const string& resource, const rgw_http_param_pair *pp,
+ int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, const rgw_http_param_pair *pp,
optional_yield y, T& t);
private:
template<class T>
-int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params,
+int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params,
bufferlist *in_data, optional_yield y, T& t)
{
bufferlist bl;
- int ret = get_resource(resource, params, nullptr, bl, in_data, nullptr, y);
+ int ret = get_resource(dpp, resource, params, nullptr, bl, in_data, nullptr, y);
if (ret < 0) {
return ret;
}
}
template<class T>
-int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params,
+int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params,
optional_yield y, T& t)
{
- return get_json_resource(resource, params, nullptr, y, t);
+ return get_json_resource(dpp, resource, params, nullptr, y, t);
}
template<class T>
-int RGWRESTConn::get_json_resource(const string& resource, const rgw_http_param_pair *pp,
+int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, const rgw_http_param_pair *pp,
optional_yield y, T& t)
{
param_vec_t params = make_param_list(pp);
- return get_json_resource(resource, ¶ms, y, t);
+ return get_json_resource(dpp, resource, ¶ms, y, t);
}
class RGWStreamIntoBufferlist : public RGWHTTPStreamRWRequest::ReceiveCB {
template <class T>
int decode_resource(T *dest);
- int read(optional_yield y);
+ int read(const DoutPrefixProvider *dpp, optional_yield y);
- int aio_read();
+ int aio_read(const DoutPrefixProvider *dpp);
string to_str() {
return req.to_str();
int wait(T *dest, optional_yield y);
template <class T>
- int fetch(T *dest, optional_yield y);
+ int fetch(const DoutPrefixProvider *dpp, T *dest, optional_yield y);
};
}
template <class T>
-int RGWRESTReadResource::fetch(T *dest, optional_yield y)
+int RGWRESTReadResource::fetch(const DoutPrefixProvider *dpp, T *dest, optional_yield y)
{
- int ret = read(y);
+ int ret = read(dpp, y);
if (ret < 0) {
return ret;
}
return req.get_io_user_info();
}
- int send(bufferlist& bl, optional_yield y);
+ int send(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y);
- int aio_send(bufferlist& bl);
+ int aio_send(const DoutPrefixProvider *dpp, bufferlist& bl);
string to_str() {
return req.to_str();
void RGWHandler_REST_IAM::rgw_iam_parse_input()
{
if (post_body.size() > 0) {
- ldout(s->cct, 10) << "Content of POST: " << post_body << dendl;
+ ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl;
if (post_body.find("Action") != string::npos) {
boost::char_separator<char> sep("&");
s->dialect = "iam";
if (int ret = RGWHandler_REST_IAM::init_from_header(s, RGW_FORMAT_XML, true); ret < 0) {
- ldout(s->cct, 10) << "init_from_header returned err=" << ret << dendl;
+ ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl;
return ret;
}
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
- dout(5) << "start-time and end-time are no longer accepted" << dendl;
+ ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
return;
}
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id " << shard << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (!max_entries_str.empty()) {
max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing max-entries " << max_entries_str << dendl;
+ ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl;
op_ret = -EINVAL;
return;
}
}
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
+ ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = store->get_zone()->get_current_period_id();
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id" << dendl;
+ ldpp_dout(this, 5) << "Missing period id" << dendl;
op_ret = -EINVAL;
return;
}
meta_log.init_list_entries(shard_id, {}, {}, marker, &handle);
- op_ret = meta_log.list_entries(handle, max_entries, entries,
+ op_ret = meta_log.list_entries(this, handle, max_entries, entries,
&last_marker, &truncated);
meta_log.complete_list_entries(handle);
unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id " << shard << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
+ ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = store->get_zone()->get_current_period_id();
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id" << dendl;
+ ldpp_dout(this, 5) << "Missing period id" << dendl;
op_ret = -EINVAL;
return;
}
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
- op_ret = meta_log.get_info(shard_id, &info);
+ op_ret = meta_log.get_info(this, shard_id, &info);
}
void RGWOp_MDLog_ShardInfo::send_response() {
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
- dout(5) << "start-time and end-time are no longer accepted" << dendl;
+ ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (s->info.args.exists("start-marker")) {
- dout(5) << "start-marker is no longer accepted" << dendl;
+ ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (!s->info.args.exists("marker")) {
marker = s->info.args.get("end-marker");
} else {
- dout(5) << "end-marker and marker cannot both be provided" << dendl;
+ ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl;
op_ret = -EINVAL;
}
}
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id " << shard << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
}
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
+ ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = store->get_zone()->get_current_period_id();
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id" << dendl;
+ ldpp_dout(this, 5) << "Missing period id" << dendl;
op_ret = -EINVAL;
return;
}
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
- op_ret = meta_log.trim(shard_id, {}, {}, {}, marker);
+ op_ret = meta_log.trim(this, shard_id, {}, {}, {}, marker);
}
void RGWOp_MDLog_Lock::execute(optional_yield y) {
zone_id = s->info.args.get("zone-id");
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
+ ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = store->get_zone()->get_current_period_id();
}
(duration_str.empty()) ||
locker_id.empty() ||
zone_id.empty()) {
- dout(5) << "Error invalid parameter list" << dendl;
+ ldpp_dout(this, 5) << "Error invalid parameter list" << dendl;
op_ret = -EINVAL;
return;
}
string err;
shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id param " << shard_id_str << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl;
op_ret = -EINVAL;
return;
}
unsigned dur;
dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err);
if (!err.empty() || dur <= 0) {
- dout(5) << "invalid length param " << duration_str << dendl;
+ ldpp_dout(this, 5) << "invalid length param " << duration_str << dendl;
op_ret = -EINVAL;
return;
}
- op_ret = meta_log.lock_exclusive(shard_id, make_timespan(dur), zone_id,
+ op_ret = meta_log.lock_exclusive(s, shard_id, make_timespan(dur), zone_id,
locker_id);
if (op_ret == -EBUSY)
op_ret = -ERR_LOCKED;
zone_id = s->info.args.get("zone-id");
if (period.empty()) {
- ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
+ ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = store->get_zone()->get_current_period_id();
}
shard_id_str.empty() ||
locker_id.empty() ||
zone_id.empty()) {
- dout(5) << "Error invalid parameter list" << dendl;
+ ldpp_dout(this, 5) << "Error invalid parameter list" << dendl;
op_ret = -EINVAL;
return;
}
string err;
shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id param " << shard_id_str << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl;
op_ret = -EINVAL;
return;
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
- op_ret = meta_log.unlock(shard_id, zone_id, locker_id);
+ op_ret = meta_log.unlock(s, shard_id, zone_id, locker_id);
}
void RGWOp_MDLog_Notify::execute(optional_yield y) {
}
char* buf = data.c_str();
- ldout(s->cct, 20) << __func__ << "(): read data: " << buf << dendl;
+ ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl;
JSONParser p;
r = p.parse(buf, data.length());
if (r < 0) {
- ldout(s->cct, 0) << "ERROR: failed to parse JSON" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl;
op_ret = r;
return;
}
try {
decode_json_obj(updated_shards, &p);
} catch (JSONDecoder::err& err) {
- ldout(s->cct, 0) << "ERROR: failed to decode JSON" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl;
op_ret = -EINVAL;
return;
}
if (store->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (set<int>::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) {
- ldout(s->cct, 20) << __func__ << "(): updated shard=" << *iter << dendl;
+ ldpp_dout(this, 20) << __func__ << "(): updated shard=" << *iter << dendl;
}
}
unsigned max_entries;
if (bucket_name.empty() && bucket_instance.empty()) {
- dout(5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
+ ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
op_ret = -EINVAL;
return;
}
}
op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
- ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
+ ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
send_response();
do {
list<rgw_bi_log_entry> entries;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_list(bucket->get_info(), shard_id,
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_list(s, bucket->get_info(), shard_id,
marker, max_entries - count,
entries, &truncated);
if (ret < 0) {
- ldpp_dout(s, 5) << "ERROR: list_bi_log_entries()" << dendl;
+ ldpp_dout(this, 5) << "ERROR: list_bi_log_entries()" << dendl;
return;
}
rgw_bucket b(rgw_bucket_key(tenant_name, bucket_name));
if (bucket_name.empty() && bucket_instance.empty()) {
- ldpp_dout(s, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
+ ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
op_ret = -EINVAL;
return;
}
}
op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
- ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
+ ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
map<RGWObjCategory, RGWStorageStats> stats;
- int ret = bucket->get_bucket_stats(shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
+ int ret = bucket->get_bucket_stats(s, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
if (ret < 0 && ret != -ENOENT) {
op_ret = ret;
return;
op_ret = 0;
if ((bucket_name.empty() && bucket_instance.empty()) ||
end_marker.empty()) {
- ldpp_dout(s, 5) << "ERROR: one of bucket and bucket instance, and also end-marker is mandatory" << dendl;
+ ldpp_dout(this, 5) << "ERROR: one of bucket and bucket instance, and also end-marker is mandatory" << dendl;
op_ret = -EINVAL;
return;
}
}
op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
- ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
+ ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_trim(bucket->get_info(), shard_id, start_marker, end_marker);
+ op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_trim(s, bucket->get_info(), shard_id, start_marker, end_marker);
if (op_ret < 0) {
- ldpp_dout(s, 5) << "ERROR: trim_bi_log_entries() " << dendl;
+ ldpp_dout(this, 5) << "ERROR: trim_bi_log_entries() " << dendl;
}
return;
}
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
- dout(5) << "start-time and end-time are no longer accepted" << dendl;
+ ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
}
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id " << shard << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (!max_entries_str.empty()) {
max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing max-entries " << max_entries_str << dendl;
+ ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl;
op_ret = -EINVAL;
return;
}
// Note that last_marker is updated to be the marker of the last
// entry listed
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->list_entries(shard_id,
+ op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->list_entries(this, shard_id,
max_entries, entries,
marker, &last_marker,
&truncated);
unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id " << shard << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->get_info(shard_id, &info);
+ op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->get_info(this, shard_id, &info);
}
void RGWOp_DATALog_ShardInfo::send_response() {
}
char* buf = data.c_str();
- ldout(s->cct, 20) << __func__ << "(): read data: " << buf << dendl;
+ ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl;
JSONParser p;
r = p.parse(buf, data.length());
if (r < 0) {
- ldout(s->cct, 0) << "ERROR: failed to parse JSON" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl;
op_ret = r;
return;
}
try {
decode_json_obj(updated_shards, &p);
} catch (JSONDecoder::err& err) {
- ldout(s->cct, 0) << "ERROR: failed to decode JSON" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl;
op_ret = -EINVAL;
return;
}
if (store->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (map<int, set<string> >::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) {
- ldout(s->cct, 20) << __func__ << "(): updated shard=" << iter->first << dendl;
+ ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl;
set<string>& keys = iter->second;
for (set<string>::iterator kiter = keys.begin(); kiter != keys.end(); ++kiter) {
- ldout(s->cct, 20) << __func__ << "(): modified key=" << *kiter << dendl;
+ ldpp_dout(this, 20) << __func__ << "(): modified key=" << *kiter << dendl;
}
}
}
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
- dout(5) << "start-time and end-time are no longer accepted" << dendl;
+ ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (s->info.args.exists("start-marker")) {
- dout(5) << "start-marker is no longer accepted" << dendl;
+ ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (!s->info.args.exists("marker")) {
marker = s->info.args.get("end-marker");
} else {
- dout(5) << "end-marker and marker cannot both be provided" << dendl;
+ ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl;
op_ret = -EINVAL;
}
}
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing shard_id " << shard << dendl;
+ ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
return;
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->trim_entries(shard_id, marker);
+ op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->trim_entries(this, shard_id, marker);
}
// not in header to avoid pulling in rgw_sync.h
{
auto sync = static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_meta_sync_manager();
if (sync == nullptr) {
- ldout(s->cct, 1) << "no sync manager" << dendl;
+ ldpp_dout(this, 1) << "no sync manager" << dendl;
op_ret = -ENOENT;
return;
}
- op_ret = sync->read_sync_status(&status);
+ op_ret = sync->read_sync_status(this, &status);
}
void RGWOp_MDLog_Status::send_response()
key = source_key;
}
if (key.empty()) {
- ldpp_dout(s, 4) << "no 'bucket' provided" << dendl;
+ ldpp_dout(this, 4) << "no 'bucket' provided" << dendl;
op_ret = -EINVAL;
return;
}
int shard_id{-1}; // unused
op_ret = rgw_bucket_parse_bucket_key(s->cct, key, &b, &shard_id);
if (op_ret < 0) {
- ldpp_dout(s, 4) << "invalid 'bucket' provided" << dendl;
+ ldpp_dout(this, 4) << "invalid 'bucket' provided" << dendl;
op_ret = -EINVAL;
return;
}
std::unique_ptr<rgw::sal::Bucket> bucket;
op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
- ldpp_dout(s, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl;
+ ldpp_dout(this, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl;
return;
}
} else {
op_ret = rgw_bucket_parse_bucket_key(s->cct, source_key, &source_bucket, nullptr);
if (op_ret < 0) {
- ldpp_dout(s, 4) << "invalid 'source-bucket' provided (key=" << source_key << ")" << dendl;
+ ldpp_dout(this, 4) << "invalid 'source-bucket' provided (key=" << source_key << ")" << dendl;
return;
}
}
pipe.dest.zone = local_zone_id;
pipe.dest.bucket = bucket->get_key();
- ldout(s->cct, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl;
+ ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl;
op_ret = rgw_bucket_sync_status(this, static_cast<rgw::sal::RadosStore*>(store), pipe, bucket->get_info(), nullptr, &status);
if (op_ret < 0) {
- lderr(s->cct) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl;
+ ldpp_dout(this, -1) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl;
}
return;
}
RGWBucketSyncPolicyHandlerRef source_handler;
op_ret = store->get_sync_policy_handler(s, source_zone_id, source_bucket, &source_handler, y);
if (op_ret < 0) {
- lderr(s->cct) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl;
+ ldpp_dout(this, -1) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl;
return;
}
for (auto& entry : local_dests) {
auto pipe = entry.second;
- ldout(s->cct, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl;
+ ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl;
RGWBucketInfo *pinfo = &bucket->get_info();
std::optional<RGWBucketInfo> opt_dest_info;
if (!pipe.dest.bucket) {
/* Uh oh, something went wrong */
- ldout(s->cct, 20) << "ERROR: RGWOp_BILog_Status::execute(optional_yield y): BUG: pipe.dest.bucket was not initialized" << pipe << dendl;
+ ldpp_dout(this, 20) << "ERROR: RGWOp_BILog_Status::execute(optional_yield y): BUG: pipe.dest.bucket was not initialized" << pipe << dendl;
op_ret = -EIO;
return;
}
std::unique_ptr<rgw::sal::Bucket> dest_bucket;
op_ret = store->get_bucket(s, nullptr, *pipe.dest.bucket, &dest_bucket, y);
if (op_ret < 0) {
- ldpp_dout(s, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl;
+ ldpp_dout(this, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl;
return;
}
int r = rgw_bucket_sync_status(this, static_cast<rgw::sal::RadosStore*>(store), pipe, *pinfo, &bucket->get_info(), ¤t_status);
if (r < 0) {
- lderr(s->cct) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl;
+ ldpp_dout(this, -1) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl;
op_ret = r;
return;
}
if (current_status.size() !=
status.size()) {
op_ret = -EINVAL;
- lderr(s->cct) << "ERROR: different number of shards for sync status of buckets syncing from the same source: status.size()= " << status.size() << " current_status.size()=" << current_status.size() << dendl;
+ ldpp_dout(this, -1) << "ERROR: different number of shards for sync status of buckets syncing from the same source: status.size()= " << status.size() << " current_status.size()=" << current_status.size() << dendl;
return;
}
auto m = status.begin();
const auto source_zone = s->info.args.get("source-zone");
auto sync = store->get_data_sync_manager(source_zone);
if (sync == nullptr) {
- ldout(s->cct, 1) << "no sync manager for source-zone " << source_zone << dendl;
+ ldpp_dout(this, 1) << "no sync manager for source-zone " << source_zone << dendl;
op_ret = -ENOENT;
return;
}
- op_ret = sync->read_sync_status(&status);
+ op_ret = sync->read_sync_status(this, &status);
}
void RGWOp_DATALog_Status::send_response()
void RGWOp_Metadata_List::execute(optional_yield y) {
string marker;
- ldout(s->cct, 16) << __func__
+ ldpp_dout(this, 16) << __func__
<< " raw marker " << s->info.args.get("marker")
<< dendl;
if (!marker.empty()) {
marker = rgw::from_base64(marker);
}
- ldout(s->cct, 16) << __func__
+ ldpp_dout(this, 16) << __func__
<< " marker " << marker << dendl;
} catch (...) {
marker = std::string("");
string err;
max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err);
if (!err.empty()) {
- dout(5) << "Error parsing max-entries " << max_entries_str << dendl;
+ ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl;
op_ret = -EINVAL;
return;
}
marker = "3:bf885d8f:root::sorry_janefonda_665:head";
*/
- op_ret = store->meta_list_keys_init(metadata_key, marker, &handle);
+ op_ret = store->meta_list_keys_init(this, metadata_key, marker, &handle);
if (op_ret < 0) {
- dout(5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl;
+ ldpp_dout(this, 5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl;
return;
}
left = (max_entries_specified ? max_entries - count : max);
op_ret = store->meta_list_keys_next(handle, left, keys, &truncated);
if (op_ret < 0) {
- dout(5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret)
+ ldpp_dout(this, 5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret)
<< dendl;
return;
}
}
read_len = recv_body(s, data, cl);
if (cl != (size_t)read_len) {
- dout(10) << "recv_body incomplete" << dendl;
+ ldpp_dout(this, 10) << "recv_body incomplete" << dendl;
}
if (read_len < 0) {
free(data);
provider_arn = s->info.args.get("OpenIDConnectProviderArn");
if (provider_arn.empty()) {
- ldout(s->cct, 20) << "ERROR: Provider ARN is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: Provider ARN is empty"<< dendl;
return -EINVAL;
}
}
if (provider_url.empty() || thumbprints.empty()) {
- ldout(s->cct, 20) << "ERROR: one of url or thumbprints is empty" << dendl;
+ ldpp_dout(this, 20) << "ERROR: one of url or thumbprints is empty" << dendl;
return -EINVAL;
}
std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = store->get_oidc_provider();
provider->set_arn(provider_arn);
provider->set_tenant(s->user->get_tenant());
- op_ret = provider->delete_obj(this, y);
+ op_ret = provider->delete_obj(s, y);
if (op_ret < 0 && op_ret != -ENOENT && op_ret != -EINVAL) {
op_ret = ERR_INTERNAL_ERROR;
int get_params() override {
topic_name = s->info.args.get("Name");
if (topic_name.empty()) {
- ldout(s->cct, 1) << "CreateTopic Action 'Name' argument is missing" << dendl;
+ ldpp_dout(this, 1) << "CreateTopic Action 'Name' argument is missing" << dendl;
return -EINVAL;
}
if (!dest.push_endpoint.empty() && dest.persistent) {
const auto ret = rgw::notify::add_persistent_topic(topic_name, s->yield);
if (ret < 0) {
- ldout(s->cct, 1) << "CreateTopic Action failed to create queue for persistent topics. error:" << ret << dendl;
+ ldpp_dout(this, 1) << "CreateTopic Action failed to create queue for persistent topics. error:" << ret << dendl;
return ret;
}
}
const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn")));
if (!topic_arn || topic_arn->resource.empty()) {
- ldout(s->cct, 1) << "GetTopic Action 'TopicArn' argument is missing or invalid" << dendl;
+ ldpp_dout(this, 1) << "GetTopic Action 'TopicArn' argument is missing or invalid" << dendl;
return -EINVAL;
}
const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn")));
if (!topic_arn || topic_arn->resource.empty()) {
- ldout(s->cct, 1) << "GetTopicAttribute Action 'TopicArn' argument is missing or invalid" << dendl;
+ ldpp_dout(this, 1) << "GetTopicAttribute Action 'TopicArn' argument is missing or invalid" << dendl;
return -EINVAL;
}
const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn")));
if (!topic_arn || topic_arn->resource.empty()) {
- ldout(s->cct, 1) << "DeleteTopic Action 'TopicArn' argument is missing or invalid" << dendl;
+ ldpp_dout(this, 1) << "DeleteTopic Action 'TopicArn' argument is missing or invalid" << dendl;
return -EINVAL;
}
return 0;
}
if (ret < 0) {
- ldout(s->cct, 1) << "DeleteTopic Action failed to remove queue for persistent topics. error:" << ret << dendl;
+ ldpp_dout(this, 1) << "DeleteTopic Action failed to remove queue for persistent topics. error:" << ret << dendl;
return ret;
}
void RGWHandler_REST_PSTopic_AWS::rgw_topic_parse_input() {
if (post_body.size() > 0) {
- ldout(s->cct, 10) << "Content of POST: " << post_body << dendl;
+ ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl;
if (post_body.find("Action") != string::npos) {
const boost::char_separator<char> sep("&");
std::tie(r, data) = read_all_input(s, max_size, false);
if (r < 0) {
- ldout(s->cct, 1) << "failed to read XML payload" << dendl;
+ ldpp_dout(this, 1) << "failed to read XML payload" << dendl;
return r;
}
if (data.length() == 0) {
- ldout(s->cct, 1) << "XML payload missing" << dendl;
+ ldpp_dout(this, 1) << "XML payload missing" << dendl;
return -EINVAL;
}
RGWXMLDecoder::XMLParser parser;
if (!parser.init()){
- ldout(s->cct, 1) << "failed to initialize XML parser" << dendl;
+ ldpp_dout(this, 1) << "failed to initialize XML parser" << dendl;
return -EINVAL;
}
if (!parser.parse(data.c_str(), data.length(), 1)) {
- ldout(s->cct, 1) << "failed to parse XML payload" << dendl;
+ ldpp_dout(this, 1) << "failed to parse XML payload" << dendl;
return -ERR_MALFORMED_XML;
}
try {
// NotificationConfigurations is mandatory
RGWXMLDecoder::decode_xml("NotificationConfiguration", configurations, &parser, true);
} catch (RGWXMLDecoder::err& err) {
- ldout(s->cct, 1) << "failed to parse XML payload. error: " << err << dendl;
+ ldpp_dout(this, 1) << "failed to parse XML payload. error: " << err << dendl;
return -ERR_MALFORMED_XML;
}
return 0;
bool exists;
const auto no_value = s->info.args.get("notification", &exists);
if (!exists) {
- ldout(s->cct, 1) << "missing required param 'notification'" << dendl;
+ ldpp_dout(this, 1) << "missing required param 'notification'" << dendl;
return -EINVAL;
}
if (no_value.length() > 0) {
- ldout(s->cct, 1) << "param 'notification' should not have any value" << dendl;
+ ldpp_dout(this, 1) << "param 'notification' should not have any value" << dendl;
return -EINVAL;
}
if (s->bucket_name.empty()) {
- ldout(s->cct, 1) << "request must be on a bucket" << dendl;
+ ldpp_dout(this, 1) << "request must be on a bucket" << dendl;
return -EINVAL;
}
bucket_name = s->bucket_name;
for (const auto& c : configurations.list) {
const auto& notif_name = c.id;
if (notif_name.empty()) {
- ldout(s->cct, 1) << "missing notification id" << dendl;
+ ldpp_dout(this, 1) << "missing notification id" << dendl;
op_ret = -EINVAL;
return;
}
if (c.topic_arn.empty()) {
- ldout(s->cct, 1) << "missing topic ARN in notification: '" << notif_name << "'" << dendl;
+ ldpp_dout(this, 1) << "missing topic ARN in notification: '" << notif_name << "'" << dendl;
op_ret = -EINVAL;
return;
}
const auto arn = rgw::ARN::parse(c.topic_arn);
if (!arn || arn->resource.empty()) {
- ldout(s->cct, 1) << "topic ARN has invalid format: '" << c.topic_arn << "' in notification: '" << notif_name << "'" << dendl;
+ ldpp_dout(this, 1) << "topic ARN has invalid format: '" << c.topic_arn << "' in notification: '" << notif_name << "'" << dendl;
op_ret = -EINVAL;
return;
}
if (std::find(c.events.begin(), c.events.end(), rgw::notify::UnknownEvent) != c.events.end()) {
- ldout(s->cct, 1) << "unknown event type in notification: '" << notif_name << "'" << dendl;
+ ldpp_dout(this, 1) << "unknown event type in notification: '" << notif_name << "'" << dendl;
op_ret = -EINVAL;
return;
}
rgw_pubsub_topic topic_info;
op_ret = ps->get_topic(topic_name, &topic_info);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
return;
}
// make sure that full topic configuration match
// generate the internal topic. destination is stored here for the "push-only" case
// when no subscription exists
// ARN is cached to make the "GET" method faster
- op_ret = ps->create_topic(unique_topic_name, topic_info.dest, topic_info.arn, topic_info.opaque_data, y);
+ op_ret = ps->create_topic(this, unique_topic_name, topic_info.dest, topic_info.arn, topic_info.opaque_data, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to auto-generate unique topic '" << unique_topic_name <<
+ ldpp_dout(this, 1) << "failed to auto-generate unique topic '" << unique_topic_name <<
"', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully auto-generated unique topic '" << unique_topic_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully auto-generated unique topic '" << unique_topic_name << "'" << dendl;
// generate the notification
rgw::notify::EventTypeList events;
- op_ret = b->create_notification(unique_topic_name, c.events, std::make_optional(c.filter), notif_name, y);
+ op_ret = b->create_notification(this, unique_topic_name, c.events, std::make_optional(c.filter), notif_name, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to auto-generate notification for unique topic '" << unique_topic_name <<
+ ldpp_dout(this, 1) << "failed to auto-generate notification for unique topic '" << unique_topic_name <<
"', ret=" << op_ret << dendl;
// rollback generated topic (ignore return value)
- ps->remove_topic(unique_topic_name, y);
+ ps->remove_topic(this, unique_topic_name, y);
return;
}
- ldout(s->cct, 20) << "successfully auto-generated notification for unique topic '" << unique_topic_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully auto-generated notification for unique topic '" << unique_topic_name << "'" << dendl;
if (!push_only) {
// generate the subscription with destination information from the original topic
dest.bucket_name = data_bucket_prefix + s->owner.get_id().to_str() + "-" + unique_topic_name;
dest.oid_prefix = data_oid_prefix + notif_name + "/";
auto sub = ps->get_sub(notif_name);
- op_ret = sub->subscribe(unique_topic_name, dest, y, notif_name);
+ op_ret = sub->subscribe(this, unique_topic_name, dest, y, notif_name);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to auto-generate subscription '" << notif_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to auto-generate subscription '" << notif_name << "', ret=" << op_ret << dendl;
// rollback generated notification (ignore return value)
- b->remove_notification(unique_topic_name, y);
+ b->remove_notification(this, unique_topic_name, y);
// rollback generated topic (ignore return value)
- ps->remove_topic(unique_topic_name, y);
+ ps->remove_topic(this, unique_topic_name, y);
return;
}
- ldout(s->cct, 20) << "successfully auto-generated subscription '" << notif_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully auto-generated subscription '" << notif_name << "'" << dendl;
}
}
}
bool exists;
notif_name = s->info.args.get("notification", &exists);
if (!exists) {
- ldout(s->cct, 1) << "missing required param 'notification'" << dendl;
+ ldpp_dout(this, 1) << "missing required param 'notification'" << dendl;
return -EINVAL;
}
if (s->bucket_name.empty()) {
- ldout(s->cct, 1) << "request must be on a bucket" << dendl;
+ ldpp_dout(this, 1) << "request must be on a bucket" << dendl;
return -EINVAL;
}
bucket_name = s->bucket_name;
}
void remove_notification_by_topic(const std::string& topic_name, const RGWPubSub::BucketRef& b, optional_yield y) {
- op_ret = b->remove_notification(topic_name, y);
+ op_ret = b->remove_notification(this, topic_name, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to remove notification of topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to remove notification of topic '" << topic_name << "', ret=" << op_ret << dendl;
}
- op_ret = ps->remove_topic(topic_name, y);
+ op_ret = ps->remove_topic(this, topic_name, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to remove auto-generated topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to remove auto-generated topic '" << topic_name << "', ret=" << op_ret << dendl;
}
}
rgw_pubsub_bucket_topics bucket_topics;
op_ret = b->get_topics(&bucket_topics);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl;
return;
}
// remove the auto generated subscription according to notification name (if exist)
const auto unique_topic_name = unique_topic->get().topic.name;
auto sub = ps->get_sub(notif_name);
- op_ret = sub->unsubscribe(unique_topic_name, y);
+ op_ret = sub->unsubscribe(this, unique_topic_name, y);
if (op_ret < 0 && op_ret != -ENOENT) {
- ldout(s->cct, 1) << "failed to remove auto-generated subscription '" << notif_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to remove auto-generated subscription '" << notif_name << "', ret=" << op_ret << dendl;
return;
}
remove_notification_by_topic(unique_topic_name, b, y);
return;
}
// notification to be removed is not found - considered success
- ldout(s->cct, 20) << "notification '" << notif_name << "' already removed" << dendl;
+ ldpp_dout(this, 20) << "notification '" << notif_name << "' already removed" << dendl;
return;
}
rgw_pubsub_sub_config sub_conf;
op_ret = sub->get_conf(&sub_conf);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get subscription '" << topic_sub_name << "' info, ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get subscription '" << topic_sub_name << "' info, ret=" << op_ret << dendl;
return;
}
if (!sub_conf.s3_id.empty()) {
// S3 notification, has autogenerated subscription
const auto& sub_topic_name = sub_conf.topic;
- op_ret = sub->unsubscribe(sub_topic_name, y);
+ op_ret = sub->unsubscribe(this, sub_topic_name, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to remove auto-generated subscription '" << topic_sub_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to remove auto-generated subscription '" << topic_sub_name << "', ret=" << op_ret << dendl;
return;
}
}
bool exists;
notif_name = s->info.args.get("notification", &exists);
if (!exists) {
- ldout(s->cct, 1) << "missing required param 'notification'" << dendl;
+ ldpp_dout(this, 1) << "missing required param 'notification'" << dendl;
return -EINVAL;
}
if (s->bucket_name.empty()) {
- ldout(s->cct, 1) << "request must be on a bucket" << dendl;
+ ldpp_dout(this, 1) << "request must be on a bucket" << dendl;
return -EINVAL;
}
bucket_name = s->bucket_name;
rgw_pubsub_bucket_topics bucket_topics;
op_ret = b->get_topics(&bucket_topics);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl;
return;
}
if (!notif_name.empty()) {
return;
}
op_ret = -ENOENT;
- ldout(s->cct, 1) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl;
return;
}
// loop through all topics of the bucket
}
ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
- op_ret = ps->create_topic(topic_name, dest, topic_arn, opaque_data, y);
+ op_ret = ps->create_topic(this, topic_name, dest, topic_arn, opaque_data, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully created topic '" << topic_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully created topic '" << topic_name << "'" << dendl;
}
void RGWPSListTopicsOp::execute(optional_yield y) {
// if there are no topics it is not considered an error
op_ret = op_ret == -ENOENT ? 0 : op_ret;
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get topics, ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get topics, ret=" << op_ret << dendl;
return;
}
if (topics_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) {
- ldout(s->cct, 1) << "topics contain secret and cannot be sent over insecure transport" << dendl;
+ ldpp_dout(this, 1) << "topics contain secret and cannot be sent over insecure transport" << dendl;
op_ret = -EPERM;
return;
}
- ldout(s->cct, 20) << "successfully got topics" << dendl;
+ ldpp_dout(this, 20) << "successfully got topics" << dendl;
}
void RGWPSGetTopicOp::execute(optional_yield y) {
ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
op_ret = ps->get_topic(topic_name, &result);
if (topic_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) {
- ldout(s->cct, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl;
+ ldpp_dout(this, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl;
op_ret = -EPERM;
return;
}
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 1) << "successfully got topic '" << topic_name << "'" << dendl;
+ ldpp_dout(this, 1) << "successfully got topic '" << topic_name << "'" << dendl;
}
void RGWPSDeleteTopicOp::execute(optional_yield y) {
return;
}
ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
- op_ret = ps->remove_topic(topic_name, y);
+ op_ret = ps->remove_topic(this, topic_name, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 1) << "successfully removed topic '" << topic_name << "'" << dendl;
+ ldpp_dout(this, 1) << "successfully removed topic '" << topic_name << "'" << dendl;
}
void RGWPSCreateSubOp::execute(optional_yield y) {
}
ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
auto sub = ps->get_sub(sub_name);
- op_ret = sub->subscribe(topic_name, dest, y);
+ op_ret = sub->subscribe(this, topic_name, dest, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to create subscription '" << sub_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to create subscription '" << sub_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully created subscription '" << sub_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully created subscription '" << sub_name << "'" << dendl;
}
void RGWPSGetSubOp::execute(optional_yield y) {
auto sub = ps->get_sub(sub_name);
op_ret = sub->get_conf(&result);
if (subscription_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) {
- ldout(s->cct, 1) << "subscription '" << sub_name << "' contain secret and cannot be sent over insecure transport" << dendl;
+ ldpp_dout(this, 1) << "subscription '" << sub_name << "' contain secret and cannot be sent over insecure transport" << dendl;
op_ret = -EPERM;
return;
}
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get subscription '" << sub_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get subscription '" << sub_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully got subscription '" << sub_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully got subscription '" << sub_name << "'" << dendl;
}
void RGWPSDeleteSubOp::execute(optional_yield y) {
}
ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
auto sub = ps->get_sub(sub_name);
- op_ret = sub->unsubscribe(topic_name, y);
+ op_ret = sub->unsubscribe(this, topic_name, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to remove subscription '" << sub_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to remove subscription '" << sub_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully removed subscription '" << sub_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully removed subscription '" << sub_name << "'" << dendl;
}
void RGWPSAckSubEventOp::execute(optional_yield y) {
sub = ps->get_sub_with_events(sub_name);
if (!sub) {
op_ret = -ENOENT;
- ldout(s->cct, 1) << "failed to get subscription '" << sub_name << "' for events, ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get subscription '" << sub_name << "' for events, ret=" << op_ret << dendl;
return;
}
op_ret = sub->list_events(s, marker, max_entries);
std::unique_ptr<rgw::sal::Bucket> bucket;
ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y);
if (ret < 0) {
- ldout(s->cct, 1) << "failed to get bucket info, cannot verify ownership" << dendl;
+ ldpp_dout(this, 1) << "failed to get bucket info, cannot verify ownership" << dendl;
return ret;
}
bucket_info = bucket->get_info();
if (bucket_info.owner != s->owner.get_id()) {
- ldout(s->cct, 1) << "user doesn't own bucket, not allowed to create notification" << dendl;
+ ldpp_dout(this, 1) << "user doesn't own bucket, not allowed to create notification" << dendl;
return -EPERM;
}
return 0;
bucket_info = bucket->get_info();
if (bucket_info.owner != s->owner.get_id()) {
- ldout(s->cct, 1) << "user doesn't own bucket, cannot remove notification" << dendl;
+ ldpp_dout(this, 1) << "user doesn't own bucket, cannot remove notification" << dendl;
return -EPERM;
}
return 0;
bucket_info = bucket->get_info();
if (bucket_info.owner != s->owner.get_id()) {
- ldout(s->cct, 1) << "user doesn't own bucket, cannot get notification list" << dendl;
+ ldpp_dout(this, 1) << "user doesn't own bucket, cannot get notification list" << dendl;
return -EPERM;
}
if (op_ret < 0) {
if (!s->err.message.empty()) {
- ldout(s->cct, 4) << "Request failed with " << op_ret
+ ldpp_dout(this, 4) << "Request failed with " << op_ret
<< ": " << s->err.message << dendl;
}
end_header(s);
period.set_id(period_id);
period.set_epoch(epoch);
- op_ret = period.init(store->ctx(), static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y, realm_name);
+ op_ret = period.init(this, store->ctx(), static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y, realm_name);
if (op_ret < 0)
- ldout(store->ctx(), 5) << "failed to read period" << dendl;
+ ldpp_dout(this, 5) << "failed to read period" << dendl;
}
// POST /admin/realm/period
auto cct = store->ctx();
// initialize the period without reading from rados
- period.init(cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y, false);
+ period.init(this, cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y, false);
// decode the period from input
const auto max_size = cct->_conf->rgw_max_put_param_size;
bool empty;
op_ret = get_json_input(cct, s, period, max_size, &empty);
if (op_ret < 0) {
- lderr(cct) << "failed to decode period" << dendl;
+ ldpp_dout(this, -1) << "failed to decode period" << dendl;
return;
}
// period that we haven't restarted with yet. we also don't want to modify
// the objects in use by RGWRados
RGWRealm realm(period.get_realm());
- op_ret = realm.init(cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
+ op_ret = realm.init(this, cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
if (op_ret < 0) {
- lderr(cct) << "failed to read current realm: "
+ ldpp_dout(this, -1) << "failed to read current realm: "
<< cpp_strerror(-op_ret) << dendl;
return;
}
RGWPeriod current_period;
- op_ret = current_period.init(cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(), y);
+ op_ret = current_period.init(this, cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(), y);
if (op_ret < 0) {
- lderr(cct) << "failed to read current period: "
+ ldpp_dout(this, -1) << "failed to read current period: "
<< cpp_strerror(-op_ret) << dendl;
return;
}
if (period.get_id().empty()) {
op_ret = period.commit(this, store, realm, current_period, error_stream, y);
if (op_ret < 0) {
- lderr(cct) << "master zone failed to commit period" << dendl;
+ ldpp_dout(this, -1) << "master zone failed to commit period" << dendl;
}
return;
}
// if it's not period commit, nobody is allowed to push to the master zone
if (period.get_master_zone() == store->get_zone()->get_params().get_id()) {
- ldout(cct, 10) << "master zone rejecting period id="
+ ldpp_dout(this, 10) << "master zone rejecting period id="
<< period.get_id() << " epoch=" << period.get_epoch() << dendl;
op_ret = -EINVAL; // XXX: error code
return;
}
// write the period to rados
- op_ret = period.store_info(false, y);
+ op_ret = period.store_info(this, false, y);
if (op_ret < 0) {
- lderr(cct) << "failed to store period " << period.get_id() << dendl;
+ ldpp_dout(this, -1) << "failed to store period " << period.get_id() << dendl;
return;
}
// set as latest epoch
- op_ret = period.update_latest_epoch(period.get_epoch(), y);
+ op_ret = period.update_latest_epoch(this, period.get_epoch(), y);
if (op_ret == -EEXIST) {
// already have this epoch (or a more recent one)
- ldout(cct, 4) << "already have epoch >= " << period.get_epoch()
+ ldpp_dout(this, 4) << "already have epoch >= " << period.get_epoch()
<< " for period " << period.get_id() << dendl;
op_ret = 0;
return;
}
if (op_ret < 0) {
- lderr(cct) << "failed to set latest epoch" << dendl;
+ ldpp_dout(this, -1) << "failed to set latest epoch" << dendl;
return;
}
auto current_epoch = current_period.get_realm_epoch();
// discard periods in the past
if (period.get_realm_epoch() < current_epoch) {
- ldout(cct, 10) << "discarding period " << period.get_id()
+ ldpp_dout(this, 10) << "discarding period " << period.get_id()
<< " with realm epoch " << period.get_realm_epoch()
<< " older than current epoch " << current_epoch << dendl;
// return success to ack that we have this period
}
// discard periods too far in the future
if (period.get_realm_epoch() > current_epoch + PERIOD_HISTORY_FETCH_MAX) {
- lderr(cct) << "discarding period " << period.get_id()
+ ldpp_dout(this, -1) << "discarding period " << period.get_id()
<< " with realm epoch " << period.get_realm_epoch() << " too far in "
"the future from current epoch " << current_epoch << dendl;
op_ret = -ENOENT; // XXX: error code
return;
}
// attach a copy of the period into the period history
- auto cursor = period_history->attach(RGWPeriod{period}, y);
+ auto cursor = period_history->attach(this, RGWPeriod{period}, y);
if (!cursor) {
// we're missing some history between the new period and current_period
op_ret = cursor.get_error();
- lderr(cct) << "failed to collect the periods between current period "
+ ldpp_dout(this, -1) << "failed to collect the periods between current period "
<< current_period.get_id() << " (realm epoch " << current_epoch
<< ") and the new period " << period.get_id()
<< " (realm epoch " << period.get_realm_epoch()
}
if (cursor.has_next()) {
// don't switch if we have a newer period in our history
- ldout(cct, 4) << "attached period " << period.get_id()
+ ldpp_dout(this, 4) << "attached period " << period.get_id()
<< " to history, but the history contains newer periods" << dendl;
return;
}
// set as current period
- op_ret = realm.set_current_period(period, y);
+ op_ret = realm.set_current_period(this, period, y);
if (op_ret < 0) {
- lderr(cct) << "failed to update realm's current period" << dendl;
+ ldpp_dout(this, -1) << "failed to update realm's current period" << dendl;
return;
}
- ldout(cct, 4) << "period " << period.get_id()
+ ldpp_dout(this, 4) << "period " << period.get_id()
<< " is newer than current period " << current_period.get_id()
<< ", updating realm's current period and notifying zone" << dendl;
- realm.notify_new_period(period, y);
+ realm.notify_new_period(this, period, y);
return;
}
// reflect the period into our local objects
- op_ret = period.reflect(y);
+ op_ret = period.reflect(this, y);
if (op_ret < 0) {
- lderr(cct) << "failed to update local objects: "
+ ldpp_dout(this, -1) << "failed to update local objects: "
<< cpp_strerror(-op_ret) << dendl;
return;
}
- ldout(cct, 4) << "period epoch " << period.get_epoch()
+ ldpp_dout(this, 4) << "period epoch " << period.get_epoch()
<< " is newer than current epoch " << current_period.get_epoch()
<< ", updating period's latest epoch and notifying zone" << dendl;
- realm.notify_new_period(period, y);
+ realm.notify_new_period(this, period, y);
// update the period history
period_history->insert(RGWPeriod{period});
}
// read realm
realm.reset(new RGWRealm(id, name));
- op_ret = realm->init(g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
+ op_ret = realm->init(this, g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
if (op_ret < 0)
- lderr(store->ctx()) << "failed to read realm id=" << id
+ ldpp_dout(this, -1) << "failed to read realm id=" << id
<< " name=" << name << dendl;
}
{
// read default realm
RGWRealm realm(store->ctx(), static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj);
- [[maybe_unused]] int ret = realm.read_default_id(default_id, y);
+ [[maybe_unused]] int ret = realm.read_default_id(this, default_id, y);
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_realms(realms);
+ op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_realms(this, realms);
if (op_ret < 0)
- lderr(store->ctx()) << "failed to list realms" << dendl;
+ ldpp_dout(this, -1) << "failed to list realms" << dendl;
}
void RGWOp_Realm_List::send_response()
max_session_duration = s->info.args.get("MaxSessionDuration");
if (role_name.empty() || trust_policy.empty()) {
- ldout(s->cct, 20) << "ERROR: one of role name or assume role policy document is empty"
+ ldpp_dout(this, 20) << "ERROR: one of role name or assume role policy document is empty"
<< dendl;
return -EINVAL;
}
const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl);
}
catch (rgw::IAM::PolicyParseException& e) {
- ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
+ ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
return -ERR_MALFORMED_DOC;
}
role_name = s->info.args.get("RoleName");
if (role_name.empty()) {
- ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl;
return -EINVAL;
}
role_name = s->info.args.get("RoleName");
if (role_name.empty()) {
- ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl;
return -EINVAL;
}
trust_policy = s->info.args.get("PolicyDocument");
if (role_name.empty() || trust_policy.empty()) {
- ldout(s->cct, 20) << "ERROR: One of role name or trust policy is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: One of role name or trust policy is empty"<< dendl;
return -EINVAL;
}
JSONParser p;
if (!p.parse(trust_policy.c_str(), trust_policy.length())) {
- ldout(s->cct, 20) << "ERROR: failed to parse assume role policy doc" << dendl;
+ ldpp_dout(this, 20) << "ERROR: failed to parse assume role policy doc" << dendl;
return -ERR_MALFORMED_DOC;
}
perm_policy = s->info.args.get("PolicyDocument");
if (role_name.empty() || policy_name.empty() || perm_policy.empty()) {
- ldout(s->cct, 20) << "ERROR: One of role name, policy name or perm policy is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: One of role name, policy name or perm policy is empty"<< dendl;
return -EINVAL;
}
bufferlist bl = bufferlist::static_from_string(perm_policy);
const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl);
}
catch (rgw::IAM::PolicyParseException& e) {
- ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
+ ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
return -ERR_MALFORMED_DOC;
}
return 0;
policy_name = s->info.args.get("PolicyName");
if (role_name.empty() || policy_name.empty()) {
- ldout(s->cct, 20) << "ERROR: One of role name or policy name is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl;
return -EINVAL;
}
return 0;
role_name = s->info.args.get("RoleName");
if (role_name.empty()) {
- ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl;
return -EINVAL;
}
return 0;
policy_name = s->info.args.get("PolicyName");
if (role_name.empty() || policy_name.empty()) {
- ldout(s->cct, 20) << "ERROR: One of role name or policy name is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl;
return -EINVAL;
}
return 0;
if (block_crypt != nullptr) {
auto f = std::make_unique<RGWGetObj_BlockDecrypt>(s->cct, cb, std::move(block_crypt));
if (manifest_bl != nullptr) {
- res = f->read_manifest(*manifest_bl);
+ res = f->read_manifest(this, *manifest_bl);
if (res == 0) {
*filter = std::move(f);
}
try {
tagset.decode(iter);
} catch (buffer::error& err) {
- ldout(s->cct,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
+ ldpp_dout(this,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
op_ret= -EIO;
return;
}
}
}
-int RGWPutBucketTags_ObjStore_S3::get_params(optional_yield y)
+int RGWPutBucketTags_ObjStore_S3::get_params(const DoutPrefixProvider *dpp, optional_yield y)
{
RGWXMLParser parser;
RGWXMLDecoder::decode_xml("Tagging", tagging, &parser);
} catch (RGWXMLDecoder::err& err) {
- ldout(s->cct, 5) << "Malformed tagging request: " << err << dendl;
+ ldpp_dout(dpp, 5) << "Malformed tagging request: " << err << dendl;
return -ERR_MALFORMED_XML;
}
return r;
obj_tags.encode(tags_bl);
- ldout(s->cct, 20) << "Read " << obj_tags.count() << "tags" << dendl;
+ ldpp_dout(dpp, 20) << "Read " << obj_tags.count() << "tags" << dendl;
// forward bucket tags requests to meta master zone
if (!store->is_meta_master()) {
bool enabled;
int r = rule.to_sync_policy_pipe(s, store, &pipe, &enabled);
if (r < 0) {
- ldout(s->cct, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl;
+ ldpp_dout(s, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl;
return r;
}
RGWXMLDecoder::decode_xml("ReplicationConfiguration", conf, &parser);
} catch (RGWXMLDecoder::err& err) {
- ldout(s->cct, 5) << "Malformed tagging request: " << err << dendl;
+ ldpp_dout(this, 5) << "Malformed tagging request: " << err << dendl;
return -ERR_MALFORMED_XML;
}
string err;
shard_id = strict_strtol(shard_id_str, 10, &err);
if (!err.empty()) {
- ldout(s->cct, 5) << "bad shard id specified: " << shard_id_str << dendl;
+ ldpp_dout(this, 5) << "bad shard id specified: " << shard_id_str << dendl;
return -EINVAL;
}
} else {
if (website_conf.is_redirect_all && website_conf.redirect_all.hostname.empty()) {
s->err.message = "A host name must be provided to redirect all requests (e.g. \"example.com\").";
- ldout(s->cct, 5) << s->err.message << dendl;
+ ldpp_dout(this, 5) << s->err.message << dendl;
return -EINVAL;
} else if (!website_conf.is_redirect_all && !website_conf.is_set_index_doc) {
s->err.message = "A value for IndexDocument Suffix must be provided if RedirectAllRequestsTo is empty";
- ldout(s->cct, 5) << s->err.message << dendl;
+ ldpp_dout(this, 5) << s->err.message << dendl;
return -EINVAL;
} else if (!website_conf.is_redirect_all && website_conf.is_set_index_doc &&
website_conf.index_doc_suffix.empty()) {
s->err.message = "The IndexDocument Suffix is not well formed";
- ldout(s->cct, 5) << s->err.message << dendl;
+ ldpp_dout(this, 5) << s->err.message << dendl;
return -EINVAL;
}
//RGWGetObj_BlockDecrypt* f = new RGWGetObj_BlockDecrypt(s->cct, cb, std::move(block_crypt));
if (f != nullptr) {
if (manifest_bl != nullptr) {
- res = f->read_manifest(*manifest_bl);
+ res = f->read_manifest(this, *manifest_bl);
if (res == 0) {
*filter = std::move(f);
}
int RGWPutCORS_ObjStore_S3::get_params(optional_yield y)
{
- RGWCORSXMLParser_S3 parser(s->cct);
+ RGWCORSXMLParser_S3 parser(this, s->cct);
RGWCORSConfiguration_S3 *cors_config;
const auto max_size = s->cct->_conf->rgw_max_put_param_size;
::encode_json("head", head_obj, &f);
::encode_json("manifest", *manifest, &f);
f.open_array_section("data_location");
- for (auto miter = manifest->obj_begin(); miter != manifest->obj_end(); ++miter) {
+ for (auto miter = manifest->obj_begin(this); miter != manifest->obj_end(this); ++miter) {
f.open_object_section("obj");
rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store);
uint64_t ofs = miter.get_ofs();
return -EACCES;
}
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.check_mfa(user->user_id, serial, pin, y);
+ int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.check_mfa(dpp, user->user_id, serial, pin, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << "NOTICE: failed to check MFA, serial=" << serial << dendl;
return -EACCES;
return get_obj_op(false);
}
-int RGWHandler_REST_S3Website::serve_errordoc(int http_ret, const string& errordoc_key, optional_yield y) {
+int RGWHandler_REST_S3Website::serve_errordoc(const DoutPrefixProvider *dpp, int http_ret, const string& errordoc_key, optional_yield y) {
int ret = 0;
s->formatter->reset(); /* Try to throw it all away */
On success, it will return zero, and no further content should be sent to the socket
On failure, we need the double-error handler
*/
- new_err_no = RGWHandler_REST_S3Website::serve_errordoc(http_error_code, s->bucket->get_info().website_conf.error_doc, y);
+ new_err_no = RGWHandler_REST_S3Website::serve_errordoc(s, http_error_code, s->bucket->get_info().website_conf.error_doc, y);
if (new_err_no != -1) {
err_no = new_err_no;
}
/* Let's canonize the HTTP headers that are covered by the AWS auth v2. */
std::string string_to_sign;
utime_t header_time;
- if (! rgw_create_s3_canonical_header(s->info, &header_time, string_to_sign,
+ if (! rgw_create_s3_canonical_header(s, s->info, &header_time, string_to_sign,
qsr)) {
ldpp_dout(s, 10) << "failed to create the canonized auth header\n"
<< rgw::crypt_sanitize::auth{s,string_to_sign} << dendl;
auto iter = dec_output.cbegin();
decode(token, iter);
} catch (const buffer::error& e) {
- ldout(cct, 0) << "ERROR: decode SessionToken failed: " << error << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: decode SessionToken failed: " << error << dendl;
return -EINVAL;
}
}
int max_size = 4096;
std::tie(ret, data) = read_all_input(s, max_size, false);
if (ret != 0) {
- ldout(s->cct, 10) << "s3-select query: failed to retrieve query; ret = " << ret << dendl;
+ ldpp_dout(this, 10) << "s3-select query: failed to retrieve query; ret = " << ret << dendl;
return ret;
}
m_s3select_query = data.to_str();
if (m_s3select_query.length() > 0) {
- ldout(s->cct, 10) << "s3-select query: " << m_s3select_query << dendl;
+ ldpp_dout(this, 10) << "s3-select query: " << m_s3select_query << dendl;
}
else {
- ldout(s->cct, 10) << "s3-select query: failed to retrieve query;" << dendl;
+ ldpp_dout(this, 10) << "s3-select query: failed to retrieve query;" << dendl;
return -1;
}
if (s3select_syntax->get_error_description().empty() == false) {
m_result.append(s3select_syntax->get_error_description());
- ldout(s->cct, 10) << "s3-select query: failed to prase query; {" << s3select_syntax->get_error_description() << "}"<< dendl;
+ ldpp_dout(this, 10) << "s3-select query: failed to prase query; {" << s3select_syntax->get_error_description() << "}"<< dendl;
status = -1;
}
else {
extract_by_tag("QuoteEscapeCharacter", m_escape_char);
extract_by_tag("CompressionType", m_compression_type);
if (m_compression_type.length()>0 && m_compression_type.compare("NONE") != 0) {
- ldout(s->cct, 10) << "RGW supports currently only NONE option for compression type" << dendl;
+ ldpp_dout(this, 10) << "RGW supports currently only NONE option for compression type" << dendl;
return -1;
}
for(auto& it : bl.buffers()) {
- ldout(s->cct, 10) << "processing segment " << i << " out of " << bl_len << " off " << ofs
+ ldpp_dout(this, 10) << "processing segment " << i << " out of " << bl_len << " off " << ofs
<< " len " << len << " obj-size " << s->obj_size << dendl;
if(it.length() == 0) {
- ldout(s->cct, 10) << "s3select:it->_len is zero. segment " << i << " out of " << bl_len
+ ldpp_dout(this, 10) << "s3select:it->_len is zero. segment " << i << " out of " << bl_len
<< " obj-size " << s->obj_size << dendl;
continue;
}
class RGWPutBucketTags_ObjStore_S3 : public RGWPutBucketTags_ObjStore
{
public:
- int get_params(optional_yield y) override;
+ int get_params(const DoutPrefixProvider *dpp, optional_yield y) override;
void send_response() override;
};
RGWOp *op_copy() override { return NULL; }
RGWOp *op_options() override { return NULL; }
- int serve_errordoc(int http_ret, const string &errordoc_key, optional_yield y);
+ int serve_errordoc(const DoutPrefixProvider *dpp, int http_ret, const string &errordoc_key, optional_yield y);
public:
using RGWHandler_REST_S3::RGWHandler_REST_S3;
~RGWHandler_REST_S3Website() override = default;
if (t) {
string role_session = s->info.args.get("RoleSessionName");
if (role_session.empty()) {
- ldout(s->cct, 0) << "Role Session Name is empty " << dendl;
+ ldpp_dout(dpp, 0) << "Role Session Name is empty " << dendl;
return result_t::deny(-EACCES);
}
string role_arn = s->info.args.get("RoleArn");
// If yes, then return 0, else -EPERM
auto p_res = p.eval_principal(s->env, *s->auth.identity);
if (p_res == rgw::IAM::Effect::Deny) {
- ldout(s->cct, 0) << "evaluating principal returned deny" << dendl;
+ ldpp_dout(this, 0) << "evaluating principal returned deny" << dendl;
return -EPERM;
}
auto c_res = p.eval_conditions(s->env);
if (c_res == rgw::IAM::Effect::Deny) {
- ldout(s->cct, 0) << "evaluating condition returned deny" << dendl;
+ ldpp_dout(this, 0) << "evaluating condition returned deny" << dendl;
return -EPERM;
}
} catch (rgw::IAM::PolicyParseException& e) {
- ldout(s->cct, 0) << "failed to parse policy: " << e.what() << dendl;
+ ldpp_dout(this, 0) << "failed to parse policy: " << e.what() << dendl;
return -EPERM;
}
s,
rgw::ARN(partition, service, "", s->user->get_tenant(), ""),
rgw::IAM::stsGetSessionToken)) {
- ldout(s->cct, 0) << "User does not have permssion to perform GetSessionToken" << dendl;
+ ldpp_dout(this, 0) << "User does not have permssion to perform GetSessionToken" << dendl;
return -EACCES;
}
string err;
uint64_t duration_in_secs = strict_strtoll(duration.c_str(), 10, &err);
if (!err.empty()) {
- ldout(s->cct, 0) << "Invalid value of input duration: " << duration << dendl;
+ ldpp_dout(this, 0) << "Invalid value of input duration: " << duration << dendl;
return -EINVAL;
}
if (duration_in_secs < STS::GetSessionTokenRequest::getMinDuration() ||
duration_in_secs > s->cct->_conf->rgw_sts_max_session_duration) {
- ldout(s->cct, 0) << "Invalid duration in secs: " << duration_in_secs << dendl;
+ ldpp_dout(this, 0) << "Invalid duration in secs: " << duration_in_secs << dendl;
return -EINVAL;
}
}
aud = s->info.args.get("aud");
if (roleArn.empty() || roleSessionName.empty() || sub.empty() || aud.empty()) {
- ldout(s->cct, 0) << "ERROR: one of role arn or role session name or token is empty" << dendl;
+ ldpp_dout(this, 0) << "ERROR: one of role arn or role session name or token is empty" << dendl;
return -EINVAL;
}
const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl);
}
catch (rgw::IAM::PolicyParseException& e) {
- ldout(s->cct, 20) << "failed to parse policy: " << e.what() << "policy" << policy << dendl;
+ ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << "policy" << policy << dendl;
return -ERR_MALFORMED_DOC;
}
}
tokenCode = s->info.args.get("TokenCode");
if (roleArn.empty() || roleSessionName.empty()) {
- ldout(s->cct, 0) << "ERROR: one of role arn or role session name is empty" << dendl;
+ ldpp_dout(this, 0) << "ERROR: one of role arn or role session name is empty" << dendl;
return -EINVAL;
}
const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl);
}
catch (rgw::IAM::PolicyParseException& e) {
- ldout(s->cct, 0) << "failed to parse policy: " << e.what() << "policy" << policy << dendl;
+ ldpp_dout(this, 0) << "failed to parse policy: " << e.what() << "policy" << policy << dendl;
return -ERR_MALFORMED_DOC;
}
}
void RGWHandler_REST_STS::rgw_sts_parse_input()
{
if (post_body.size() > 0) {
- ldout(s->cct, 10) << "Content of POST: " << post_body << dendl;
+ ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl;
if (post_body.find("Action") != string::npos) {
boost::char_separator<char> sep("&");
s->dialect = "sts";
if (int ret = RGWHandler_REST_STS::init_from_header(s, RGW_FORMAT_XML, true); ret < 0) {
- ldout(s->cct, 10) << "init_from_header returned err=" << ret << dendl;
+ ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl;
return ret;
}
RGWCORSConfiguration_SWIFT *swift_cors = new RGWCORSConfiguration_SWIFT;
int r = swift_cors->create_update(allow_origins, allow_headers, expose_headers, max_age);
if (r < 0) {
- dout(0) << "Error creating/updating the cors configuration" << dendl;
+ ldpp_dout(s, 0) << "Error creating/updating the cors configuration" << dendl;
delete swift_cors;
return r;
}
next_tok(req, first, '/');
- dout(10) << "ver=" << ver << " first=" << first << " req=" << req << dendl;
+ ldpp_dout(s, 10) << "ver=" << ver << " first=" << first << " req=" << req << dendl;
if (first.size() == 0)
return 0;
op_state.max_entries = max_entries;
op_state.marker = marker;
- op_ret = RGWUserAdminOp_User::list(store, op_state, flusher);
+ op_ret = RGWUserAdminOp_User::list(this, store, op_state, flusher);
}
class RGWOp_User_Info : public RGWRESTOp {
RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str);
if (!s->user->get_info().system && system) {
- ldout(s->cct, 0) << "cannot set system flag by non-system user" << dendl;
+ ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl;
op_ret = -EINVAL;
return;
}
uint32_t op_mask;
int ret = rgw_parse_op_type_list(op_mask_str, &op_mask);
if (ret < 0) {
- ldout(s->cct, 0) << "failed to parse op_mask: " << ret << dendl;
+ ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl;
op_ret = -EINVAL;
return;
}
rgw_placement_rule target_rule;
target_rule.from_str(default_placement_str);
if (!store->get_zone()->get_params().valid_placement(target_rule)) {
- ldout(s->cct, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
+ ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
op_ret = -EINVAL;
return;
}
}
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str);
if (!s->user->get_info().system && system) {
- ldout(s->cct, 0) << "cannot set system flag by non-system user" << dendl;
+ ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl;
op_ret = -EINVAL;
return;
}
if (!op_mask_str.empty()) {
uint32_t op_mask;
if (rgw_parse_op_type_list(op_mask_str, &op_mask) < 0) {
- ldout(s->cct, 0) << "failed to parse op_mask" << dendl;
+ ldpp_dout(this, 0) << "failed to parse op_mask" << dendl;
op_ret = -EINVAL;
return;
}
uint32_t op_mask;
int ret = rgw_parse_op_type_list(op_mask_str, &op_mask);
if (ret < 0) {
- ldout(s->cct, 0) << "failed to parse op_mask: " << ret << dendl;
+ ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl;
op_ret = -EINVAL;
return;
}
rgw_placement_rule target_rule;
target_rule.from_str(default_placement_str);
if (!store->get_zone()->get_params().valid_placement(target_rule)) {
- ldout(s->cct, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
+ ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
op_ret = -EINVAL;
return;
}
}
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_state.set_purge_data(purge_data);
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_state.set_key_type(key_type);
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_state.set_key_type(key_type);
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_state.set_purge_keys();
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_state.set_caps(caps);
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_state.set_caps(caps);
bufferlist data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
bool set_user = set_all || (quota_type == "user");
if (!(set_all || set_bucket || set_user)) {
- ldout(store->ctx(), 20) << "invalid quota type" << dendl;
+ ldpp_dout(this, 20) << "invalid quota type" << dendl;
op_ret = -EINVAL;
return;
}
}
if (use_http_params && set_all) {
- ldout(store->ctx(), 20) << "quota type was not specified, can't set all quotas via http headers" << dendl;
+ ldpp_dout(this, 20) << "quota type was not specified, can't set all quotas via http headers" << dendl;
op_ret = -EINVAL;
return;
}
bool RGWRestUserPolicy::validate_input()
{
if (policy_name.length() > MAX_POLICY_NAME_LEN) {
- ldout(s->cct, 0) << "ERROR: Invalid policy name length " << dendl;
+ ldpp_dout(this, 0) << "ERROR: Invalid policy name length " << dendl;
return false;
}
std::regex regex_policy_name("[A-Za-z0-9:=,.@-]+");
if (! std::regex_match(policy_name, regex_policy_name)) {
- ldout(s->cct, 0) << "ERROR: Invalid chars in policy name " << dendl;
+ ldpp_dout(this, 0) << "ERROR: Invalid chars in policy name " << dendl;
return false;
}
policy = url_decode(s->info.args.get("PolicyDocument"), true);
if (policy_name.empty() || user_name.empty() || policy.empty()) {
- ldout(s->cct, 20) << "ERROR: one of policy name, user name or policy document is empty"
+ ldpp_dout(this, 20) << "ERROR: one of policy name, user name or policy document is empty"
<< dendl;
return -EINVAL;
}
}
ceph::bufferlist in_data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: forward_request_to_master returned ret=" << op_ret << dendl;
return;
op_ret = -ERR_INTERNAL_ERROR;
}
} catch (rgw::IAM::PolicyParseException& e) {
- ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
+ ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
op_ret = -ERR_MALFORMED_DOC;
}
user_name = s->info.args.get("UserName");
if (policy_name.empty() || user_name.empty()) {
- ldout(s->cct, 20) << "ERROR: one of policy name or user name is empty"
+ ldpp_dout(this, 20) << "ERROR: one of policy name or user name is empty"
<< dendl;
return -EINVAL;
}
user_name = s->info.args.get("UserName");
if (user_name.empty()) {
- ldout(s->cct, 20) << "ERROR: user name is empty" << dendl;
+ ldpp_dout(this, 20) << "ERROR: user name is empty" << dendl;
return -EINVAL;
}
user_name = s->info.args.get("UserName");
if (policy_name.empty() || user_name.empty()) {
- ldout(s->cct, 20) << "ERROR: One of policy name or user name is empty"<< dendl;
+ ldpp_dout(this, 20) << "ERROR: One of policy name or user name is empty"<< dendl;
return -EINVAL;
}
}
ceph::bufferlist in_data;
- op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
// a policy might've been uploaded to this site when there was no sync
// req. in earlier releases, proceed deletion
std::unique_ptr<Bucket>* bucket,
optional_yield y) = 0;
virtual bool is_meta_master() = 0;
- virtual int forward_request_to_master(User* user, obj_version* objv,
+ virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv,
bufferlist& in_data, JSONParser* jp, req_info& info,
optional_yield y) = 0;
virtual int defer_gc(const DoutPrefixProvider* dpp, RGWObjectCtx* rctx, Bucket* bucket, Object* obj,
const DoutPrefixProvider* dpp, optional_yield y) = 0;
virtual RGWLC* get_rgwlc(void) = 0;
virtual RGWCoroutinesManagerRegistry* get_cr_registry() = 0;
- virtual int delete_raw_obj(const rgw_raw_obj& obj) = 0;
- virtual int delete_raw_obj_aio(const rgw_raw_obj& obj, Completions* aio) = 0;
+ virtual int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) = 0;
+ virtual int delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, Completions* aio) = 0;
virtual void get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) = 0;
virtual int get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_obj& obj, uint64_t* chunk_size) = 0;
- virtual int log_usage(map<rgw_user_bucket, RGWUsageBatch>& usage_info) = 0;
- virtual int log_op(std::string& oid, bufferlist& bl) = 0;
+ virtual int log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info) = 0;
+ virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) = 0;
virtual int register_to_service_map(const std::string& daemon_type,
const map<std::string, std::string>& meta) = 0;
virtual void get_quota(RGWQuotaInfo& bucket_quota, RGWQuotaInfo& user_quota) = 0;
virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) = 0;
virtual void wakeup_meta_sync_shards(set<int>& shard_ids) = 0;
virtual void wakeup_data_sync_shards(const rgw_zone_id& source_zone, map<int, set<std::string> >& shard_ids) = 0;
- virtual int clear_usage() = 0;
- virtual int read_all_usage(uint64_t start_epoch, uint64_t end_epoch,
+ virtual int clear_usage(const DoutPrefixProvider *dpp) = 0;
+ virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage) = 0;
- virtual int trim_all_usage(uint64_t start_epoch, uint64_t end_epoch) = 0;
+ virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) = 0;
virtual int get_config_key_val(std::string name, bufferlist* bl) = 0;
- virtual int meta_list_keys_init(const std::string& section, const std::string& marker, void** phandle) = 0;
+ virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) = 0;
virtual int meta_list_keys_next(void* handle, int max, list<std::string>& keys, bool* truncated) = 0;
virtual void meta_list_keys_complete(void* handle) = 0;
virtual std::string meta_get_marker(void* handle) = 0;
static bool empty(User* u) { return (!u || u->info.user_id.id.empty()); }
static bool empty(std::unique_ptr<User>& u) { return (!u || u->info.user_id.id.empty()); }
virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) = 0;
- virtual int read_stats(optional_yield y, RGWStorageStats* stats,
+ virtual int read_stats(const DoutPrefixProvider *dpp,
+ optional_yield y, RGWStorageStats* stats,
ceph::real_time* last_stats_sync = nullptr,
ceph::real_time* last_stats_update = nullptr) = 0;
- virtual int read_stats_async(RGWGetUserStats_CB* cb) = 0;
- virtual int complete_flush_stats(optional_yield y) = 0;
- virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
+ virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb) = 0;
+ virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0;
+ virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage) = 0;
- virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) = 0;
+ virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) = 0;
virtual RGWObjVersionTracker& get_version_tracker() { return objv_tracker; }
virtual Attrs& get_attrs() { return attrs; }
virtual void set_attrs(Attrs& _attrs) { attrs = _attrs; }
virtual RGWAccessControlPolicy& get_acl(void) = 0;
virtual int set_acl(const DoutPrefixProvider* dpp, RGWAccessControlPolicy& acl, optional_yield y) = 0;
virtual int get_bucket_info(const DoutPrefixProvider* dpp, optional_yield y) = 0;
- virtual int get_bucket_stats(int shard_id,
+ virtual int get_bucket_stats(const DoutPrefixProvider *dpp, int shard_id,
std::string* bucket_ver, std::string* master_ver,
std::map<RGWObjCategory, RGWStorageStats>& stats,
std::string* max_marker = nullptr,
bool* syncstopped = nullptr) = 0;
- virtual int get_bucket_stats_async(int shard_id, RGWGetBucketStats_CB* ctx) = 0;
+ virtual int get_bucket_stats_async(const DoutPrefixProvider *dpp, int shard_id, RGWGetBucketStats_CB* ctx) = 0;
virtual int read_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y) = 0;
- virtual int sync_user_stats(optional_yield y) = 0;
+ virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0;
virtual int update_container_stats(const DoutPrefixProvider* dpp) = 0;
virtual int check_bucket_shards(const DoutPrefixProvider* dpp) = 0;
virtual int link(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint = true, RGWObjVersionTracker* objv = nullptr) = 0;
virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) = 0;
virtual int set_instance_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) = 0;
virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) = 0;
- virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
+ virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage) = 0;
- virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) = 0;
- virtual int remove_objs_from_index(std::list<rgw_obj_index_key>& objs_to_unlink) = 0;
- virtual int check_index(std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) = 0;
- virtual int rebuild_index() = 0;
- virtual int set_tag_timeout(uint64_t timeout) = 0;
+ virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) = 0;
+ virtual int remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink) = 0;
+ virtual int check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) = 0;
+ virtual int rebuild_index(const DoutPrefixProvider *dpp) = 0;
+ virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) = 0;
virtual int purge_instance(const DoutPrefixProvider* dpp) = 0;
bool empty() const { return info.bucket.name.empty(); }
virtual ~StatOp() = default;
- virtual int stat_async() = 0;
+ virtual int stat_async(const DoutPrefixProvider *dpp) = 0;
virtual int wait() = 0;
};
virtual void gen_rand_obj_instance_name() = 0;
virtual void raw_obj_to_obj(const rgw_raw_obj& raw_obj) = 0;
virtual void get_raw_obj(rgw_raw_obj* raw_obj) = 0;
- virtual MPSerializer* get_serializer(const std::string& lock_name) = 0;
+ virtual MPSerializer* get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) = 0;
virtual int transition(RGWObjectCtx& rctx,
Bucket* bucket,
const rgw_placement_rule& placement_rule,
virtual std::unique_ptr<StatOp> get_stat_op(RGWObjectCtx*) = 0;
/* OMAP */
- virtual int omap_get_vals(const std::string& marker, uint64_t count,
+ virtual int omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count,
std::map<std::string, bufferlist>* m,
bool* pmore, optional_yield y) = 0;
- virtual int omap_get_all(std::map<std::string, bufferlist>* m,
+ virtual int omap_get_all(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist>* m,
optional_yield y) = 0;
- virtual int omap_get_vals_by_keys(const std::string& oid,
+ virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid,
const std::set<std::string>& keys,
Attrs* vals) = 0;
- virtual int omap_set_val_by_key(const std::string& key, bufferlist& val,
+ virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val,
bool must_exist, optional_yield y) = 0;
static bool empty(Object* o) { return (!o || o->empty()); }
Serializer() = default;
virtual ~Serializer() = default;
- virtual int try_lock(utime_t dur, optional_yield y) = 0;
+ virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) = 0;
virtual int unlock() = 0;
};
Notification(Object* _obj, rgw::notify::EventType _type) : obj(_obj), event_type(_type) {}
virtual ~Notification() = default;
- virtual int publish_reserve(RGWObjTags* obj_tags = nullptr) = 0;
+ virtual int publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags = nullptr) = 0;
virtual int publish_commit(const DoutPrefixProvider* dpp, uint64_t size,
const ceph::real_time& mtime, const std::string& etag) = 0;
};
GCChain(Object* _obj) : obj(_obj) {}
virtual ~GCChain() = default;
- virtual void update(RGWObjManifest* manifest) = 0;
+ virtual void update(const DoutPrefixProvider *dpp, RGWObjManifest* manifest) = 0;
virtual int send(const std::string& tag) = 0;
- virtual void delete_inline(const std::string& tag) = 0;
+ virtual void delete_inline(const DoutPrefixProvider *dpp, const std::string& tag) = 0;
};
using RawObjSet = std::set<rgw_raw_obj>;
return store->ctl()->user->get_attrs_by_uid(dpp, get_id(), &attrs, y, &objv_tracker);
}
-int RadosUser::read_stats(optional_yield y, RGWStorageStats* stats,
+int RadosUser::read_stats(const DoutPrefixProvider *dpp,
+ optional_yield y, RGWStorageStats* stats,
ceph::real_time* last_stats_sync,
ceph::real_time* last_stats_update)
{
- return store->ctl()->user->read_stats(get_id(), stats, y, last_stats_sync, last_stats_update);
+ return store->ctl()->user->read_stats(dpp, get_id(), stats, y, last_stats_sync, last_stats_update);
}
-int RadosUser::read_stats_async(RGWGetUserStats_CB* cb)
+int RadosUser::read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb)
{
- return store->ctl()->user->read_stats_async(get_id(), cb);
+ return store->ctl()->user->read_stats_async(dpp, get_id(), cb);
}
-int RadosUser::complete_flush_stats(optional_yield y)
+int RadosUser::complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y)
{
- return store->ctl()->user->complete_flush_stats(get_id(), y);
+ return store->ctl()->user->complete_flush_stats(dpp, get_id(), y);
}
-int RadosUser::read_usage(uint64_t start_epoch, uint64_t end_epoch,
+int RadosUser::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
{
std::string bucket_name;
- return store->getRados()->read_usage(get_id(), bucket_name, start_epoch,
+ return store->getRados()->read_usage(dpp, get_id(), bucket_name, start_epoch,
end_epoch, max_entries, is_truncated,
usage_iter, usage);
}
-int RadosUser::trim_usage(uint64_t start_epoch, uint64_t end_epoch)
+int RadosUser::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch)
{
std::string bucket_name;
- return store->getRados()->trim_usage(get_id(), bucket_name, start_epoch, end_epoch);
+ return store->getRados()->trim_usage(dpp, get_id(), bucket_name, start_epoch, end_epoch);
}
int RadosUser::load_user(const DoutPrefixProvider* dpp, optional_yield y)
}
}
- ret = store->ctl()->bucket->sync_user_stats(info.owner, info, y);
+ ret = store->ctl()->bucket->sync_user_stats(dpp, info.owner, info, y);
if (ret < 0) {
ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
// they should be removed (note that any pending notifications on the bucket are still going to be sent)
RGWPubSub ps(store, info.owner.tenant);
RGWPubSub::Bucket ps_bucket(&ps, info.bucket);
- const auto ps_ret = ps_bucket.remove_notifications(y);
+ const auto ps_ret = ps_bucket.remove_notifications(dpp, y);
if (ps_ret < 0 && ps_ret != -ENOENT) {
lderr(store->ctx()) << "ERROR: unable to remove notifications from bucket. ret=" << ps_ret << dendl;
}
if (forward_to_master) {
bufferlist in_data;
- ret = store->forward_request_to_master(owner, &ot.read_version, in_data, nullptr, *req_info, y);
+ ret = store->forward_request_to_master(dpp, owner, &ot.read_version, in_data, nullptr, *req_info, y);
if (ret < 0) {
if (ret == -ENOENT) {
/* adjust error, we want to return with NoSuchBucket and not
return ret;
}
-int RadosBucket::get_bucket_stats(int shard_id,
+int RadosBucket::get_bucket_stats(const DoutPrefixProvider *dpp, int shard_id,
std::string* bucket_ver, std::string* master_ver,
std::map<RGWObjCategory, RGWStorageStats>& stats,
std::string* max_marker, bool* syncstopped)
{
- return store->getRados()->get_bucket_stats(info, shard_id, bucket_ver, master_ver, stats, max_marker, syncstopped);
+ return store->getRados()->get_bucket_stats(dpp, info, shard_id, bucket_ver, master_ver, stats, max_marker, syncstopped);
}
-int RadosBucket::get_bucket_stats_async(int shard_id, RGWGetBucketStats_CB* ctx)
+int RadosBucket::get_bucket_stats_async(const DoutPrefixProvider *dpp, int shard_id, RGWGetBucketStats_CB* ctx)
{
- return store->getRados()->get_bucket_stats_async(get_info(), shard_id, ctx);
+ return store->getRados()->get_bucket_stats_async(dpp, get_info(), shard_id, ctx);
}
int RadosBucket::read_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y)
return ret;
}
-int RadosBucket::sync_user_stats(optional_yield y)
+int RadosBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y)
{
- return store->ctl()->bucket->sync_user_stats(owner->get_id(), info, y);
+ return store->ctl()->bucket->sync_user_stats(dpp, owner->get_id(), info, y);
}
int RadosBucket::update_container_stats(const DoutPrefixProvider* dpp)
return store->getRados()->try_refresh_bucket_info(info, pmtime, dpp, &attrs);
}
-int RadosBucket::read_usage(uint64_t start_epoch, uint64_t end_epoch,
+int RadosBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
{
- return store->getRados()->read_usage(owner->get_id(), get_name(), start_epoch,
+ return store->getRados()->read_usage(dpp, owner->get_id(), get_name(), start_epoch,
end_epoch, max_entries, is_truncated,
usage_iter, usage);
}
-int RadosBucket::trim_usage(uint64_t start_epoch, uint64_t end_epoch)
+int RadosBucket::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch)
{
- return store->getRados()->trim_usage(owner->get_id(), get_name(), start_epoch, end_epoch);
+ return store->getRados()->trim_usage(dpp, owner->get_id(), get_name(), start_epoch, end_epoch);
}
-int RadosBucket::remove_objs_from_index(std::list<rgw_obj_index_key>& objs_to_unlink)
+int RadosBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink)
{
- return store->getRados()->remove_objs_from_index(info, objs_to_unlink);
+ return store->getRados()->remove_objs_from_index(dpp, info, objs_to_unlink);
}
-int RadosBucket::check_index(std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats)
+int RadosBucket::check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats)
{
- return store->getRados()->bucket_check_index(info, &existing_stats, &calculated_stats);
+ return store->getRados()->bucket_check_index(dpp, info, &existing_stats, &calculated_stats);
}
-int RadosBucket::rebuild_index()
+int RadosBucket::rebuild_index(const DoutPrefixProvider *dpp)
{
- return store->getRados()->bucket_rebuild_index(info);
+ return store->getRados()->bucket_rebuild_index(dpp, info);
}
-int RadosBucket::set_tag_timeout(uint64_t timeout)
+int RadosBucket::set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout)
{
- return store->getRados()->cls_obj_set_bucket_tag_timeout(info, timeout);
+ return store->getRados()->cls_obj_set_bucket_tag_timeout(dpp, info, timeout);
}
int RadosBucket::purge_instance(const DoutPrefixProvider* dpp)
if (!svc()->zone->is_meta_master()) {
JSONParser jp;
- ret = forward_request_to_master(&u, NULL, in_data, &jp, req_info, y);
+ ret = forward_request_to_master(dpp, &u, NULL, in_data, &jp, req_info, y);
if (ret < 0) {
return ret;
}
if (*existed) {
rgw_placement_rule selected_placement_rule;
- ret = svc()->zone->select_bucket_placement(u.get_info(),
+ ret = svc()->zone->select_bucket_placement(dpp, u.get_info(),
zid, placement_rule,
&selected_placement_rule, nullptr, y);
if (selected_placement_rule != info.placement_rule) {
return svc()->zone->is_meta_master();
}
-int RadosStore::forward_request_to_master(User* user, obj_version* objv,
+int RadosStore::forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv,
bufferlist& in_data,
JSONParser* jp, req_info& info,
optional_yield y)
}
if (!svc()->zone->get_master_conn()) {
- ldout(ctx(), 0) << "rest connection is invalid" << dendl;
+ ldpp_dout(dpp, 0) << "rest connection is invalid" << dendl;
return -EINVAL;
}
- ldout(ctx(), 0) << "sending request to master zonegroup" << dendl;
+ ldpp_dout(dpp, 0) << "sending request to master zonegroup" << dendl;
bufferlist response;
std::string uid_str = user->get_id().to_str();
#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
- int ret = svc()->zone->get_master_conn()->forward(rgw_user(uid_str), info,
+ int ret = svc()->zone->get_master_conn()->forward(dpp, rgw_user(uid_str), info,
objv, MAX_REST_RESPONSE,
&in_data, &response, y);
if (ret < 0)
return ret;
- ldout(ctx(), 20) << "response: " << response.c_str() << dendl;
+ ldpp_dout(dpp, 20) << "response: " << response.c_str() << dendl;
if (jp && !jp->parse(response.c_str(), response.length())) {
- ldout(ctx(), 0) << "failed parsing response from master zonegroup" << dendl;
+ ldpp_dout(dpp, 0) << "failed parsing response from master zonegroup" << dendl;
return -EINVAL;
}
struct req_state* s,
rgw::notify::EventType event_type)
{
- return std::unique_ptr<Notification>(new RadosNotification(this, obj, s, event_type));
+ return std::unique_ptr<Notification>(new RadosNotification(s, this, obj, s, event_type));
}
std::unique_ptr<GCChain> RadosStore::get_gc_chain(rgw::sal::Object* obj)
return std::unique_ptr<Writer>(new RadosWriter(aio, this, bucket, obj_ctx, std::move(_head_obj), dpp, y));
}
-int RadosStore::delete_raw_obj(const rgw_raw_obj& obj)
+int RadosStore::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj)
{
- return rados->delete_raw_obj(obj);
+ return rados->delete_raw_obj(dpp, obj);
}
-int RadosStore::delete_raw_obj_aio(const rgw_raw_obj& obj, Completions* aio)
+int RadosStore::delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, Completions* aio)
{
RadosCompletions* raio = static_cast<RadosCompletions*>(aio);
- return rados->delete_raw_obj_aio(obj, raio->handles);
+ return rados->delete_raw_obj_aio(dpp, obj, raio->handles);
}
void RadosStore::get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj)
return rados->get_max_chunk_size(obj.pool, chunk_size, dpp);
}
-int RadosStore::log_usage(map<rgw_user_bucket, RGWUsageBatch>& usage_info)
+int RadosStore::log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info)
{
- return rados->log_usage(usage_info);
+ return rados->log_usage(dpp, usage_info);
}
-int RadosStore::log_op(std::string& oid, bufferlist& bl)
+int RadosStore::log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl)
{
rgw_raw_obj obj(svc()->zone->get_zone_params().log_pool, oid);
- int ret = rados->append_async(obj, bl.length(), bl);
+ int ret = rados->append_async(dpp, obj, bl.length(), bl);
if (ret == -ENOENT) {
- ret = rados->create_pool(svc()->zone->get_zone_params().log_pool);
+ ret = rados->create_pool(dpp, svc()->zone->get_zone_params().log_pool);
if (ret < 0)
return ret;
// retry
- ret = rados->append_async(obj, bl.length(), bl);
+ ret = rados->append_async(dpp, obj, bl.length(), bl);
}
return ret;
return rados->get_data_sync_manager(source_zone);
}
-int RadosStore::read_all_usage(uint64_t start_epoch, uint64_t end_epoch,
+int RadosStore::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
rgw_user uid;
std::string bucket_name;
- return rados->read_usage(uid, bucket_name, start_epoch, end_epoch, max_entries,
+ return rados->read_usage(dpp, uid, bucket_name, start_epoch, end_epoch, max_entries,
is_truncated, usage_iter, usage);
}
-int RadosStore::trim_all_usage(uint64_t start_epoch, uint64_t end_epoch)
+int RadosStore::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch)
{
rgw_user uid;
std::string bucket_name;
- return rados->trim_usage(uid, bucket_name, start_epoch, end_epoch);
+ return rados->trim_usage(dpp, uid, bucket_name, start_epoch, end_epoch);
}
int RadosStore::get_config_key_val(std::string name, bufferlist* bl)
return svc()->config_key->get(name, true, bl);
}
-int RadosStore::meta_list_keys_init(const std::string& section, const std::string& marker, void** phandle)
+int RadosStore::meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle)
{
- return ctl()->meta.mgr->list_keys_init(section, marker, phandle);
+ return ctl()->meta.mgr->list_keys_init(dpp, section, marker, phandle);
}
int RadosStore::meta_list_keys_next(void* handle, int max, list<std::string>& keys, bool* truncated)
RGWListRawObjsCtx ctx;
do {
list<std::string> oids;
- int r = rados->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated);
+ int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: "
<< prefix << ": " << cpp_strerror(-r) << dendl;
RGWListRawObjsCtx ctx;
do {
list<std::string> oids;
- int r = rados->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated);
+ int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: OIDC pool: "
<< pool.name << ": " << prefix << ": " << cpp_strerror(-r) << dendl;
return 0;
}
-int RadosStore::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx* ioctx)
+int RadosStore::get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx* ioctx)
{
- return rados->get_obj_head_ioctx(bucket_info, obj, ioctx);
+ return rados->get_obj_head_ioctx(dpp, bucket_info, obj, ioctx);
}
int Object::range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end)
store->getRados()->obj_to_raw((bucket->get_info()).placement_rule, get_obj(), raw_obj);
}
-int RadosObject::omap_get_vals(const std::string& marker, uint64_t count,
+int RadosObject::omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count,
std::map<std::string, bufferlist> *m,
bool* pmore, optional_yield y)
{
get_raw_obj(&raw_obj);
auto sysobj = obj_ctx.get_obj(raw_obj);
- return sysobj.omap().get_vals(marker, count, m, pmore, y);
+ return sysobj.omap().get_vals(dpp, marker, count, m, pmore, y);
}
-int RadosObject::omap_get_all(std::map<std::string, bufferlist> *m,
+int RadosObject::omap_get_all(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist> *m,
optional_yield y)
{
auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
get_raw_obj(&raw_obj);
auto sysobj = obj_ctx.get_obj(raw_obj);
- return sysobj.omap().get_all(m, y);
+ return sysobj.omap().get_all(dpp, m, y);
}
-int RadosObject::omap_get_vals_by_keys(const std::string& oid,
+int RadosObject::omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid,
const std::set<std::string>& keys,
Attrs* vals)
{
rgw_obj obj = get_obj();
store->getRados()->obj_to_raw(bucket->get_placement_rule(), obj, &head_obj);
- ret = store->get_obj_head_ioctx(bucket->get_info(), obj, &cur_ioctx);
+ ret = store->get_obj_head_ioctx(dpp, bucket->get_info(), obj, &cur_ioctx);
if (ret < 0) {
return ret;
}
return cur_ioctx.omap_get_vals_by_keys(oid, keys, vals);
}
-int RadosObject::omap_set_val_by_key(const std::string& key, bufferlist& val,
+int RadosObject::omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val,
bool must_exist, optional_yield y)
{
rgw_raw_obj raw_meta_obj;
auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(raw_meta_obj);
- return sysobj.omap().set_must_exist(must_exist).set(key, val, y);
+ return sysobj.omap().set_must_exist(must_exist).set(dpp, key, val, y);
}
-MPSerializer* RadosObject::get_serializer(const std::string& lock_name)
+MPSerializer* RadosObject::get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name)
{
- return new MPRadosSerializer(store, this, lock_name);
+ return new MPRadosSerializer(dpp, store, this, lock_name);
}
int RadosObject::transition(RGWObjectCtx& rctx,
parent_op(&op_target)
{ }
-int RadosObject::RadosStatOp::stat_async()
+int RadosObject::RadosStatOp::stat_async(const DoutPrefixProvider *dpp)
{
- return parent_op.stat_async();
+ return parent_op.stat_async(dpp);
}
int RadosObject::RadosStatOp::wait()
y);
}
-MPRadosSerializer::MPRadosSerializer(RadosStore* store, RadosObject* obj, const std::string& lock_name) :
+MPRadosSerializer::MPRadosSerializer(const DoutPrefixProvider *dpp, RadosStore* store, RadosObject* obj, const std::string& lock_name) :
lock(lock_name)
{
rgw_pool meta_pool;
oid = raw_obj.oid;
store->getRados()->get_obj_data_pool(obj->get_bucket()->get_placement_rule(),
obj->get_obj(), &meta_pool);
- store->getRados()->open_pool_ctx(meta_pool, ioctx, true);
+ store->getRados()->open_pool_ctx(dpp, meta_pool, ioctx, true);
}
-int MPRadosSerializer::try_lock(utime_t dur, optional_yield y)
+int MPRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y)
{
op.assert_exists();
lock.set_duration(dur);
lock.lock_exclusive(&op);
- int ret = rgw_rados_operate(ioctx, oid, &op, y);
+ int ret = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (! ret) {
locked = true;
}
lock.set_cookie(cookie);
}
-int LCRadosSerializer::try_lock(utime_t dur, optional_yield y)
+int LCRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y)
{
lock.set_duration(dur);
return lock.lock_exclusive(ioctx, oid);
return new LCRadosSerializer(store, oid, lock_name, cookie);
}
-int RadosNotification::publish_reserve(RGWObjTags* obj_tags)
+int RadosNotification::publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags)
{
- return rgw::notify::publish_reserve(event_type, res, obj_tags);
+ return rgw::notify::publish_reserve(dpp, event_type, res, obj_tags);
}
int RadosNotification::publish_commit(const DoutPrefixProvider* dpp, uint64_t size,
return rgw::notify::publish_commit(obj, size, mtime, etag, event_type, res, dpp);
}
-void RadosGCChain::update(RGWObjManifest* manifest)
+void RadosGCChain::update(const DoutPrefixProvider *dpp, RGWObjManifest* manifest)
{
rgw_obj target = obj->get_obj();
- store->getRados()->update_gc_chain(target, *manifest, &chain);
+ store->getRados()->update_gc_chain(dpp, target, *manifest, &chain);
}
int RadosGCChain::send(const std::string& tag)
return store->getRados()->send_chain_to_gc(chain, tag);
}
-void RadosGCChain::delete_inline(const std::string& tag)
+void RadosGCChain::delete_inline(const DoutPrefixProvider *dpp, const std::string& tag)
{
- store->getRados()->delete_objs_inline(chain, tag);
+ store->getRados()->delete_objs_inline(dpp, chain, tag);
}
int RadosWriter::set_stripe_obj(const rgw_raw_obj& raw_obj)
{
stripe_obj = store->svc()->rados->obj(raw_obj);
- return stripe_obj.open();
+ return stripe_obj.open(dpp);
}
int RadosWriter::process(bufferlist&& bl, uint64_t offset)
continue;
}
- int r = store->delete_raw_obj(obj);
+ int r = store->delete_raw_obj(dpp, obj);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << obj << "), leaked" << dendl;
}
bufferlist bl;
ceph::encode(script, bl);
- int r = rgw_put_system_obj(obj_ctx, pool, key, bl, false, nullptr, real_time(), y);
+ int r = rgw_put_system_obj(dpp, obj_ctx, pool, key, bl, false, nullptr, real_time(), y);
if (r < 0) {
return r;
}
int RadosLuaScriptManager::del(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key)
{
- int r = rgw_delete_system_obj(store->svc()->sysobj, pool, key, nullptr, y);
+ int r = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, key, nullptr, y);
if (r < 0 && r != -ENOENT) {
return r;
}
bufferlist bl;
using ceph::encode;
encode(*this, bl);
- return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().oidc_pool, oid, bl, exclusive, nullptr, real_time(), y);
+ return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().oidc_pool, oid, bl, exclusive, nullptr, real_time(), y);
}
int RadosOIDCProvider::read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant)
// Delete url
std::string oid = tenant + get_url_oid_prefix() + url;
- ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y);
+ ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting oidc url from pool: " << pool.name << ": "
<< provider_url << ": " << cpp_strerror(-ret) << dendl;
bufferlist bl;
encode(*this, bl);
- return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y);
+ return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y);
}
int RadosRole::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
using ceph::encode;
encode(nameToId, bl);
- return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y);
+ return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y);
}
int RadosRole::store_path(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
bufferlist bl;
- return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y);
+ return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y);
}
int RadosRole::read_id(const DoutPrefixProvider *dpp, const std::string& role_name, const std::string& tenant, std::string& role_id, optional_yield y)
//Delete the role info that was stored in the previous call
std::string oid = get_info_oid_prefix() + id;
- int info_ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y);
+ int info_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (info_ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from Role pool: "
<< id << ": " << cpp_strerror(-info_ret) << dendl;
<< path << ": " << cpp_strerror(-ret) << dendl;
//Delete the role info that was stored in the previous call
std::string oid = get_info_oid_prefix() + id;
- int info_ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y);
+ int info_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (info_ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from Role pool: "
<< id << ": " << cpp_strerror(-info_ret) << dendl;
}
//Delete role name that was stored in previous call
oid = tenant + get_names_oid_prefix() + name;
- int name_ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y);
+ int name_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (name_ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup of role name from Role pool: "
<< name << ": " << cpp_strerror(-name_ret) << dendl;
// Delete id
std::string oid = get_info_oid_prefix() + id;
- ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y);
+ ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting role id from Role pool: "
<< id << ": " << cpp_strerror(-ret) << dendl;
// Delete name
oid = tenant + get_names_oid_prefix() + name;
- ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y);
+ ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting role name from Role pool: "
<< name << ": " << cpp_strerror(-ret) << dendl;
// Delete path
oid = tenant + get_path_oid_prefix() + path + get_info_oid_prefix() + id;
- ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y);
+ ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting role path from Role pool: "
<< path << ": " << cpp_strerror(-ret) << dendl;
optional_yield y) override;
virtual Bucket* create_bucket(rgw_bucket& bucket, ceph::real_time creation_time) override;
virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int read_stats(optional_yield y, RGWStorageStats* stats,
+ virtual int read_stats(const DoutPrefixProvider *dpp,
+ optional_yield y, RGWStorageStats* stats,
ceph::real_time* last_stats_sync = nullptr,
ceph::real_time* last_stats_update = nullptr) override;
- virtual int read_stats_async(RGWGetUserStats_CB* cb) override;
- virtual int complete_flush_stats(optional_yield y) override;
- virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
+ virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb) override;
+ virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
+ virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
- virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) override;
+ virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override;
virtual int load_user(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info = nullptr) override;
public:
RadosStatOp(RadosObject* _source, RGWObjectCtx* _rctx);
- virtual int stat_async() override;
+ virtual int stat_async(const DoutPrefixProvider *dpp) override;
virtual int wait() override;
};
virtual std::unique_ptr<Object> clone() override {
return std::unique_ptr<Object>(new RadosObject(*this));
}
- virtual MPSerializer* get_serializer(const std::string& lock_name) override;
+ virtual MPSerializer* get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) override;
virtual int transition(RGWObjectCtx& rctx,
Bucket* bucket,
const rgw_placement_rule& placement_rule,
virtual std::unique_ptr<StatOp> get_stat_op(RGWObjectCtx*) override;
/* OMAP */
- virtual int omap_get_vals(const std::string& marker, uint64_t count,
+ virtual int omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count,
std::map<std::string, bufferlist> *m,
bool* pmore, optional_yield y) override;
- virtual int omap_get_all(std::map<std::string, bufferlist> *m,
+ virtual int omap_get_all(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist> *m,
optional_yield y) override;
- virtual int omap_get_vals_by_keys(const std::string& oid,
+ virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid,
const std::set<std::string>& keys,
Attrs* vals) override;
- virtual int omap_set_val_by_key(const std::string& key, bufferlist& val,
+ virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val,
bool must_exist, optional_yield y) override;
private:
virtual RGWAccessControlPolicy& get_acl(void) override { return acls; }
virtual int set_acl(const DoutPrefixProvider* dpp, RGWAccessControlPolicy& acl, optional_yield y) override;
virtual int get_bucket_info(const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int get_bucket_stats(int shard_id,
+ virtual int get_bucket_stats(const DoutPrefixProvider *dpp, int shard_id,
std::string* bucket_ver, std::string* master_ver,
std::map<RGWObjCategory, RGWStorageStats>& stats,
std::string* max_marker = nullptr,
bool* syncstopped = nullptr) override;
- virtual int get_bucket_stats_async(int shard_id, RGWGetBucketStats_CB* ctx) override;
+ virtual int get_bucket_stats_async(const DoutPrefixProvider *dpp, int shard_id, RGWGetBucketStats_CB* ctx) override;
virtual int read_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int sync_user_stats(optional_yield y) override;
+ virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int update_container_stats(const DoutPrefixProvider* dpp) override;
virtual int check_bucket_shards(const DoutPrefixProvider* dpp) override;
virtual int link(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint, RGWObjVersionTracker* objv) override;
virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
virtual int set_instance_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) override;
virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) override;
- virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
+ virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
- virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) override;
- virtual int remove_objs_from_index(std::list<rgw_obj_index_key>& objs_to_unlink) override;
- virtual int check_index(std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) override;
- virtual int rebuild_index() override;
- virtual int set_tag_timeout(uint64_t timeout) override;
+ virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override;
+ virtual int remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink) override;
+ virtual int check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) override;
+ virtual int rebuild_index(const DoutPrefixProvider *dpp) override;
+ virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) override;
virtual int purge_instance(const DoutPrefixProvider* dpp) override;
virtual std::unique_ptr<Bucket> clone() override {
return std::make_unique<RadosBucket>(*this);
std::unique_ptr<Bucket>* bucket,
optional_yield y) override;
virtual bool is_meta_master() override;
- virtual int forward_request_to_master(User* user, obj_version* objv,
+ virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv,
bufferlist& in_data, JSONParser* jp, req_info& info,
optional_yield y) override;
virtual int defer_gc(const DoutPrefixProvider* dpp, RGWObjectCtx* rctx, Bucket* bucket, Object* obj,
const DoutPrefixProvider* dpp, optional_yield y) override;
virtual RGWLC* get_rgwlc(void) override { return rados->get_lc(); }
virtual RGWCoroutinesManagerRegistry* get_cr_registry() override { return rados->get_cr_registry(); }
- virtual int delete_raw_obj(const rgw_raw_obj& obj) override;
- virtual int delete_raw_obj_aio(const rgw_raw_obj& obj, Completions* aio) override;
+ virtual int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) override;
+ virtual int delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, Completions* aio) override;
virtual void get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) override;
virtual int get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_obj& obj, uint64_t* chunk_size) override;
- virtual int log_usage(map<rgw_user_bucket, RGWUsageBatch>& usage_info) override;
- virtual int log_op(std::string& oid, bufferlist& bl) override;
+ virtual int log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info) override;
+ virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) override;
virtual int register_to_service_map(const std::string& daemon_type,
const map<std::string, std::string>& meta) override;
virtual void get_quota(RGWQuotaInfo& bucket_quota, RGWQuotaInfo& user_quota) override;
virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) override;
virtual void wakeup_meta_sync_shards(set<int>& shard_ids) override { rados->wakeup_meta_sync_shards(shard_ids); }
virtual void wakeup_data_sync_shards(const rgw_zone_id& source_zone, map<int, set<std::string> >& shard_ids) override { rados->wakeup_data_sync_shards(source_zone, shard_ids); }
- virtual int clear_usage() override { return rados->clear_usage(); }
- virtual int read_all_usage(uint64_t start_epoch, uint64_t end_epoch,
+ virtual int clear_usage(const DoutPrefixProvider *dpp) override { return rados->clear_usage(dpp); }
+ virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
- virtual int trim_all_usage(uint64_t start_epoch, uint64_t end_epoch) override;
+ virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override;
virtual int get_config_key_val(std::string name, bufferlist* bl) override;
- virtual int meta_list_keys_init(const std::string& section, const std::string& marker, void** phandle) override;
+ virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) override;
virtual int meta_list_keys_next(void* handle, int max, list<std::string>& keys, bool* truncated) override;
virtual void meta_list_keys_complete(void* handle) override;
virtual std::string meta_get_marker(void* handle) override;
}
/* Unique to RadosStore */
- int get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj,
+ int get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
librados::IoCtx* ioctx);
void setRados(RGWRados * st) { rados = st; }
librados::ObjectWriteOperation op;
public:
- MPRadosSerializer(RadosStore* store, RadosObject* obj, const std::string& lock_name);
+ MPRadosSerializer(const DoutPrefixProvider *dpp, RadosStore* store, RadosObject* obj, const std::string& lock_name);
- virtual int try_lock(utime_t dur, optional_yield y) override;
+ virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override;
virtual int unlock() override {
return lock.unlock(&ioctx, oid);
}
public:
LCRadosSerializer(RadosStore* store, const std::string& oid, const std::string& lock_name, const std::string& cookie);
- virtual int try_lock(utime_t dur, optional_yield y) override;
+ virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override;
virtual int unlock() override {
return lock.unlock(ioctx, oid);
}
rgw::notify::reservation_t res;
public:
- RadosNotification(RadosStore* _store, Object* _obj, req_state* _s, rgw::notify::EventType _type) : Notification(_obj, _type), store(_store), res(_store, _s, _obj) { }
+ RadosNotification(const DoutPrefixProvider *_dpp, RadosStore* _store, Object* _obj, req_state* _s, rgw::notify::EventType _type) : Notification(_obj, _type), store(_store), res(_dpp, _store, _s, _obj) { }
~RadosNotification() = default;
- virtual int publish_reserve(RGWObjTags* obj_tags = nullptr) override;
+ virtual int publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags = nullptr) override;
virtual int publish_commit(const DoutPrefixProvider* dpp, uint64_t size,
const ceph::real_time& mtime, const std::string& etag) override;
};
RadosGCChain(RadosStore* _store, Object* _obj) : GCChain(_obj), store(_store) {}
~RadosGCChain() = default;
- virtual void update(RGWObjManifest* manifest) override;
+ virtual void update(const DoutPrefixProvider *dpp, RGWObjManifest* manifest) override;
virtual int send(const std::string& tag) override;
- virtual void delete_inline(const std::string& tag) override;
+ virtual void delete_inline(const DoutPrefixProvider *dpp, const std::string& tag) override;
};
class RadosWriter : public Writer {
user_rados = std::make_unique<RGWSI_User_RADOS>(cct);
if (have_cache) {
- sysobj_cache = std::make_unique<RGWSI_SysObj_Cache>(cct);
+ sysobj_cache = std::make_unique<RGWSI_SysObj_Cache>(dpp, cct);
}
vector<RGWSI_MetaBackend *> meta_bes{meta_be_sobj.get(), meta_be_otp.get()};
return r;
}
- r = datalog_rados->start(&zone->get_zone(),
+ r = datalog_rados->start(dpp, &zone->get_zone(),
zone->get_zone_params(),
rados->get_rados_handle());
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start datalog_rados service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start datalog_rados service (" << cpp_strerror(-r) << dendl;
return r;
}
return string(buf);
}
-RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message) {
+RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const DoutPrefixProvider *dpp, const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message) {
cls_log_entry entry;
rgw_sync_error_info info(source_zone, error_code, message);
uint32_t shard_id = ++counter % num_shards;
- return new RGWRadosTimelogAddCR(store, oids[shard_id], entry);
+ return new RGWRadosTimelogAddCR(dpp, store, oids[shard_id], entry);
}
void RGWSyncBackoff::update_wait_time()
op->wait(utime_t(cur_wait, 0));
}
-int RGWBackoffControlCR::operate() {
+int RGWBackoffControlCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
// retry the operation until it succeeds
while (true) {
JSONDecoder::decode_json("entries", entries, obj);
};
-int RGWShardCollectCR::operate() {
+int RGWShardCollectCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
while (spawn_next()) {
current_running++;
delete error_logger;
}
-int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info)
+int RGWRemoteMetaLog::read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info)
{
rgw_http_param_pair pairs[] = { { "type", "metadata" },
{ NULL, NULL } };
- int ret = conn->get_json_resource("/admin/log", pairs, null_yield, *log_info);
+ int ret = conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog info" << dendl;
return ret;
return 0;
}
-int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
+int RGWRemoteMetaLog::read_master_log_shards_info(const DoutPrefixProvider *dpp, const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
rgw_mdlog_info log_info;
- int ret = read_log_info(&log_info);
+ int ret = read_log_info(dpp, &log_info);
if (ret < 0) {
return ret;
}
- return run(new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info));
+ return run(dpp, new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info));
}
-int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
+int RGWRemoteMetaLog::read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
- return run(new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result));
+ return run(dpp, new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result));
}
int RGWRemoteMetaLog::init()
#define CLONE_MAX_ENTRIES 100
-int RGWMetaSyncStatusManager::init()
+int RGWMetaSyncStatusManager::init(const DoutPrefixProvider *dpp)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
if (!store->svc()->zone->get_master_conn()) {
- lderr(store->ctx()) << "no REST connection to master zone" << dendl;
+ ldpp_dout(dpp, -1) << "no REST connection to master zone" << dendl;
return -EIO;
}
- int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true);
+ int r = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl;
return r;
}
r = master_log.init();
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to init remote log, r=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to init remote log, r=" << r << dendl;
return r;
}
RGWMetaSyncEnv& sync_env = master_log.get_sync_env();
rgw_meta_sync_status sync_status;
- r = read_sync_status(&sync_status);
+ r = read_sync_status(dpp, &sync_status);
if (r < 0 && r != -ENOENT) {
- lderr(store->ctx()) << "ERROR: failed to read sync status, r=" << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed to read sync status, r=" << r << dendl;
return r;
}
}
class RGWAsyncReadMDLogEntries : public RGWAsyncRadosRequest {
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWMetadataLog *mdlog;
int shard_id;
int max_entries;
protected:
- int _send_request() override {
+ int _send_request(const DoutPrefixProvider *dpp) override {
real_time from_time;
real_time end_time;
mdlog->init_list_entries(shard_id, from_time, end_time, marker, &handle);
- int ret = mdlog->list_entries(handle, max_entries, entries, &marker, &truncated);
+ int ret = mdlog->list_entries(dpp, handle, max_entries, entries, &marker, &truncated);
mdlog->complete_list_entries(handle);
list<cls_log_entry> entries;
bool truncated;
- RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
+ RGWAsyncReadMDLogEntries(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
RGWMetadataLog* mdlog, int _shard_id,
std::string _marker, int _max_entries)
- : RGWAsyncRadosRequest(caller, cn), store(_store), mdlog(mdlog),
+ : RGWAsyncRadosRequest(caller, cn), dpp(dpp), store(_store), mdlog(mdlog),
shard_id(_shard_id), max_entries(_max_entries), marker(std::move(_marker)) {}
};
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
marker = *pmarker;
- req = new RGWAsyncReadMDLogEntries(this, stack->create_completion_notifier(),
+ req = new RGWAsyncReadMDLogEntries(dpp, this, stack->create_completion_notifier(),
sync_env->store, mdlog, shard_id, marker,
max_entries);
sync_env->async_rados->queue(req);
: RGWCoroutine(env->store->ctx()), env(env), http_op(NULL),
period(period), shard_id(_shard_id), shard_info(_shard_info) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
auto store = env->store;
RGWRESTConn *conn = store->svc()->zone->get_master_conn();
reenter(this) {
init_new_io(http_op);
- int ret = http_op->aio_read();
+ int ret = http_op->aio_read(dpp);
if (ret < 0) {
ldpp_dout(env->dpp, 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
: RGWSimpleCoroutine(env->store->ctx()), sync_env(env), http_op(NULL),
period(period), shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
RGWRESTConn *conn = sync_env->conn;
char buf[32];
http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager);
init_new_io(http_op);
- int ret = http_op->aio_read();
+ int ret = http_op->aio_read(dpp);
if (ret < 0) {
- ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to read from " << p << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
return ret;
}
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
int ret;
reenter(this) {
yield {
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
- ldpp_dout(sync_env->dpp, 5) << "lease cr failed, done early " << dendl;
+ ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl;
set_status("lease lock failed, early abort");
return set_cr_error(lease_cr->get_ret_status());
}
yield {
set_status("writing sync status");
rgw::sal::RadosStore* store = sync_env->store;
- call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+ call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
if (retcode < 0) {
set_status("failed to write sync status");
- ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl;
yield lease_cr->go_down();
return set_cr_error(retcode);
}
marker.next_step_marker = info.marker;
marker.timestamp = info.last_update;
rgw::sal::RadosStore* store = sync_env->store;
- spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
+ spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(dpp,
+ sync_env->async_rados,
store->svc()->sysobj,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
marker), true);
set_status("changing sync state: build full sync maps");
status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
rgw::sal::RadosStore* store = sync_env->store;
- call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+ call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
using CR = RGWSimpleRadosReadCR<rgw_meta_sync_marker>;
rgw_raw_obj obj{env->store->svc()->zone->get_zone_params().log_pool,
env->shard_obj_name(shard_id)};
- spawn(new CR(env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false);
+ spawn(new CR(env->dpp, env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false);
shard_id++;
return true;
}
rgw_meta_sync_status *_status)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), sync_status(_status)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int RGWReadSyncStatusCoroutine::operate()
+int RGWReadSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// read sync info
bool empty_on_enoent = false; // fail on ENOENT
rgw_raw_obj obj{sync_env->store->svc()->zone->get_zone_params().log_pool,
sync_env->status_oid()};
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, obj,
+ call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, obj,
&sync_status->sync_info, empty_on_enoent));
}
if (retcode < 0) {
- ldpp_dout(sync_env->dpp, 4) << "failed to read sync status info with "
+ ldpp_dout(dpp, 4) << "failed to read sync status info with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
yield call(new ReadMarkersCR(sync_env, sync_status->sync_info.num_shards,
sync_status->sync_markers));
if (retcode < 0) {
- ldpp_dout(sync_env->dpp, 4) << "failed to read sync status markers with "
+ ldpp_dout(dpp, 4) << "failed to read sync status markers with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
std::back_inserter(sections));
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
RGWRESTConn *conn = sync_env->conn;
reenter(this) {
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
- ldpp_dout(sync_env->dpp, 5) << "lease cr failed, done early " << dendl;
+ ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl;
set_status("failed acquiring lock");
return set_cr_error(lease_cr->get_ret_status());
}
"/admin/metadata", NULL, §ions));
}
if (get_ret_status() < 0) {
- ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch metadata sections" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata sections" << dendl;
yield entries_index->finish();
yield lease_cr->go_down();
drain_all();
int shard_id = (int)iter->first;
rgw_meta_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
- spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj,
rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
marker), true);
}
section + ":" + key);
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
RGWRESTConn *conn = sync_env->conn;
reenter(this) {
yield {
init_new_io(http_op);
- int ret = http_op->aio_read();
+ int ret = http_op->aio_read(dpp);
if (ret < 0) {
- ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
return set_cr_error(ret);
bufferlist bl;
const DoutPrefixProvider *dpp;
protected:
- int _send_request() override {
+ int _send_request(const DoutPrefixProvider *dpp) override {
int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, dpp, RGWMDLogSyncType::APPLY_ALWAYS, true);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl;
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncMetaStoreEntry(this, stack->create_completion_notifier(),
- sync_env->store, raw_key, bl, sync_env->dpp);
+ sync_env->store, raw_key, bl, dpp);
sync_env->async_rados->queue(req);
return 0;
}
string raw_key;
const DoutPrefixProvider *dpp;
protected:
- int _send_request() override {
+ int _send_request(const DoutPrefixProvider *dpp) override {
int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl;
}
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncMetaRemoveEntry(this, stack->create_completion_notifier(),
- sync_env->store, raw_key, sync_env->dpp);
+ sync_env->store, raw_key, dpp);
sync_env->async_rados->queue(req);
return 0;
}
#define META_SYNC_UPDATE_MARKER_WINDOW 10
-int RGWLastCallerWinsCR::operate() {
+int RGWLastCallerWinsCR::operate(const DoutPrefixProvider *dpp) {
RGWCoroutine *call_cr;
reenter(this) {
while (cr) {
ldpp_dout(sync_env->dpp, 20) << __func__ << "(): updating marker marker_oid=" << marker_oid << " marker=" << new_marker << " realm_epoch=" << sync_marker.realm_epoch << dendl;
tn->log(20, SSTR("new marker=" << new_marker));
rgw::sal::RadosStore* store = sync_env->store;
- return new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
+ return new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->dpp, sync_env->async_rados,
store->svc()->sysobj,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, marker_oid),
sync_marker);
tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", raw_key);
}
-int RGWMetaSyncSingleEntryCR::operate() {
+int RGWMetaSyncSingleEntryCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
#define NUM_TRANSIENT_ERROR_RETRIES 10
}
if ((sync_status == -EAGAIN || sync_status == -ECANCELED) && (tries < NUM_TRANSIENT_ERROR_RETRIES - 1)) {
- ldpp_dout(sync_env->dpp, 20) << *this << ": failed to fetch remote metadata: " << section << ":" << key << ", will retry" << dendl;
+ ldpp_dout(dpp, 20) << *this << ": failed to fetch remote metadata: " << section << ":" << key << ", will retry" << dendl;
continue;
}
if (sync_status < 0) {
tn->log(10, SSTR("failed to send read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status));
log_error() << "failed to send read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status << std::endl;
- yield call(sync_env->error_logger->log_error_cr(sync_env->conn->get_remote_id(), section, key, -sync_status,
+ yield call(sync_env->error_logger->log_error_cr(dpp, sync_env->conn->get_remote_id(), section, key, -sync_status,
string("failed to read remote metadata entry: ") + cpp_strerror(-sync_status)));
return set_cr_error(sync_status);
}
yield call(new RGWMetaRemoveEntryCR(sync_env, raw_key));
}
if ((retcode == -EAGAIN || retcode == -ECANCELED) && (tries < NUM_TRANSIENT_ERROR_RETRIES - 1)) {
- ldpp_dout(sync_env->dpp, 20) << *this << ": failed to store metadata: " << section << ":" << key << ", got retcode=" << retcode << dendl;
+ ldpp_dout(dpp, 20) << *this << ": failed to store metadata: " << section << ":" << key << ", got retcode=" << retcode << dendl;
continue;
}
break;
}
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
int state_init();
int state_read_shard_status();
int state_read_shard_status_complete();
- int state_send_rest_request();
+ int state_send_rest_request(const DoutPrefixProvider *dpp);
int state_receive_rest_response();
int state_store_mdlog_entries();
int state_store_mdlog_entries_complete();
marker_tracker = mt;
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
int r;
while (true) {
switch (sync_marker.state) {
case rgw_meta_sync_marker::FullSync:
r = full_sync();
if (r < 0) {
- ldpp_dout(sync_env->dpp, 10) << "sync: full_sync: shard_id=" << shard_id << " r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "sync: full_sync: shard_id=" << shard_id << " r=" << r << dendl;
return set_cr_error(r);
}
return 0;
case rgw_meta_sync_marker::IncrementalSync:
r = incremental_sync();
if (r < 0) {
- ldpp_dout(sync_env->dpp, 10) << "sync: incremental_sync: shard_id=" << shard_id << " r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "sync: incremental_sync: shard_id=" << shard_id << " r=" << r << dendl;
return set_cr_error(r);
}
return 0;
ldpp_dout(sync_env->dpp, 4) << *this << ": saving marker pos=" << temp_marker->marker << " realm_epoch=" << realm_epoch << dendl;
using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_meta_sync_marker>;
- yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ yield call(new WriteMarkerCR(sync_env->dpp, sync_env->async_rados, sync_env->store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
*temp_marker));
}
RGWCoroutine *alloc_finisher_cr() override {
rgw::sal::RadosStore* store = sync_env->store;
- return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->async_rados, store->svc()->sysobj,
+ return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->dpp, sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
&sync_marker);
}
~RGWMetaSyncCR() {
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
// loop through one period at a time
tn->log(1, "start");
if (cursor == sync_env->store->svc()->mdlog->get_period_history()->get_current()) {
next = RGWPeriodHistory::Cursor{};
if (cursor) {
- ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on current period="
+ ldpp_dout(dpp, 10) << "RGWMetaSyncCR on current period="
<< cursor.get_period().get_id() << dendl;
} else {
- ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR with no period" << dendl;
+ ldpp_dout(dpp, 10) << "RGWMetaSyncCR with no period" << dendl;
}
} else {
next = cursor;
next.next();
- ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on period="
+ ldpp_dout(dpp, 10) << "RGWMetaSyncCR on period="
<< cursor.get_period().get_id() << ", next="
<< next.get_period().get_id() << dendl;
}
period_marker = next.get_period().get_sync_status()[shard_id];
if (period_marker.empty()) {
// no metadata changes have occurred on this shard, skip it
- ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR: skipping shard " << shard_id
+ ldpp_dout(dpp, 10) << "RGWMetaSyncCR: skipping shard " << shard_id
<< " with empty period marker" << dendl;
continue;
}
// write the updated sync info
sync_status.sync_info.period = cursor.get_period().get_id();
sync_status.sync_info.realm_epoch = cursor.get_epoch();
- yield call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados,
+ yield call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, sync_env->async_rados,
sync_env->store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->status_oid()),
sync_status.sync_info));
env->sync_tracer = store->getRados()->get_sync_tracer();
}
-int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status)
+int RGWRemoteMetaLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
RGWMetaSyncEnv sync_env_local = sync_env;
sync_env_local.http_manager = &http_manager;
tn->log(20, "read sync status");
- ret = crs.run(new RGWReadSyncStatusCoroutine(&sync_env_local, sync_status));
+ ret = crs.run(dpp, new RGWReadSyncStatusCoroutine(&sync_env_local, sync_status));
http_manager.stop();
return ret;
}
-int RGWRemoteMetaLog::init_sync_status()
+int RGWRemoteMetaLog::init_sync_status(const DoutPrefixProvider *dpp)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
rgw_mdlog_info mdlog_info;
- int r = read_log_info(&mdlog_info);
+ int r = read_log_info(dpp, &mdlog_info);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl;
return r;
}
sync_info.realm_epoch = cursor.get_epoch();
}
- return run(new RGWInitSyncStatusCoroutine(&sync_env, sync_info));
+ return run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_info));
}
-int RGWRemoteMetaLog::store_sync_info(const rgw_meta_sync_info& sync_info)
+int RGWRemoteMetaLog::store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info)
{
tn->log(20, "store sync info");
- return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store->svc()->sysobj,
+ return run(dpp, new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, async_rados, store->svc()->sysobj,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.status_oid()),
sync_info));
}
// return a cursor to the period at our sync position
-static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RadosStore* store,
+static RGWPeriodHistory::Cursor get_period_at(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store,
const rgw_meta_sync_info& info,
optional_yield y)
{
// verify that the period ids match
auto& existing = cursor.get_period().get_id();
if (existing != info.period) {
- lderr(store->ctx()) << "ERROR: sync status period=" << info.period
+ ldpp_dout(dpp, -1) << "ERROR: sync status period=" << info.period
<< " does not match period=" << existing
<< " in history at realm epoch=" << info.realm_epoch << dendl;
return RGWPeriodHistory::Cursor{-EEXIST};
// read the period from rados or pull it from the master
RGWPeriod period;
- int r = store->svc()->mdlog->pull_period(info.period, period, y);
+ int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to read period id "
+ ldpp_dout(dpp, -1) << "ERROR: failed to read period id "
<< info.period << ": " << cpp_strerror(r) << dendl;
return RGWPeriodHistory::Cursor{r};
}
// attach the period to our history
- cursor = store->svc()->mdlog->get_period_history()->attach(std::move(period), y);
+ cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y);
if (!cursor) {
r = cursor.get_error();
- lderr(store->ctx()) << "ERROR: failed to read period history back to "
+ ldpp_dout(dpp, -1) << "ERROR: failed to read period history back to "
<< info.period << ": " << cpp_strerror(r) << dendl;
}
return cursor;
}
-int RGWRemoteMetaLog::run_sync(optional_yield y)
+int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl;
return 0;
}
- r = read_log_info(&mdlog_info);
+ r = read_log_info(dpp, &mdlog_info);
if (r == -EIO || r == -ENOENT) {
// keep retrying if master isn't alive or hasn't initialized the log
ldpp_dout(dpp, 10) << __func__ << "(): waiting for master.." << dendl;
}
backoff.reset();
if (r < 0) {
- lderr(store->ctx()) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl;
return r;
}
break;
ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl;
return 0;
}
- r = run(new RGWReadSyncStatusCoroutine(&sync_env, &sync_status));
+ r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status));
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch sync status r=" << r << dendl;
return r;
sync_status.sync_info.period = cursor.get_period().get_id();
sync_status.sync_info.realm_epoch = cursor.get_epoch();
}
- r = run(new RGWInitSyncStatusCoroutine(&sync_env, sync_status.sync_info));
+ r = run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_status.sync_info));
if (r == -EBUSY) {
backoff.backoff_sleep();
continue;
auto num_shards = sync_status.sync_info.num_shards;
if (num_shards != mdlog_info.num_shards) {
- lderr(store->ctx()) << "ERROR: can't sync, mismatch between num shards, master num_shards=" << mdlog_info.num_shards << " local num_shards=" << num_shards << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: can't sync, mismatch between num shards, master num_shards=" << mdlog_info.num_shards << " local num_shards=" << num_shards << dendl;
return -EINVAL;
}
RGWPeriodHistory::Cursor cursor;
do {
- r = run(new RGWReadSyncStatusCoroutine(&sync_env, &sync_status));
+ r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status));
if (r < 0 && r != -ENOENT) {
tn->log(0, SSTR("ERROR: failed to fetch sync status r=" << r));
return r;
switch ((rgw_meta_sync_info::SyncState)sync_status.sync_info.state) {
case rgw_meta_sync_info::StateBuildingFullSyncMaps:
tn->log(20, "building full sync maps");
- r = run(new RGWFetchAllMetaCR(&sync_env, num_shards, sync_status.sync_markers, tn));
+ r = run(dpp, new RGWFetchAllMetaCR(&sync_env, num_shards, sync_status.sync_markers, tn));
if (r == -EBUSY || r == -EAGAIN) {
backoff.backoff_sleep();
continue;
}
sync_status.sync_info.state = rgw_meta_sync_info::StateSync;
- r = store_sync_info(sync_status.sync_info);
+ r = store_sync_info(dpp, sync_status.sync_info);
if (r < 0) {
tn->log(0, SSTR("ERROR: failed to update sync status (r=" << r << ")"));
return r;
case rgw_meta_sync_info::StateSync:
tn->log(20, "sync");
// find our position in the period history (if any)
- cursor = get_period_at(store, sync_status.sync_info, y);
+ cursor = get_period_at(dpp, store, sync_status.sync_info, y);
r = cursor.get_error();
if (r < 0) {
return r;
}
meta_sync_cr = new RGWMetaSyncCR(&sync_env, cursor, sync_status, tn);
- r = run(meta_sync_cr);
+ r = run(dpp, meta_sync_cr);
if (r < 0) {
tn->log(0, "ERROR: failed to fetch all metadata keys");
return r;
meta_sync_cr->wakeup(shard_id);
}
-int RGWCloneMetaLogCoroutine::operate()
+int RGWCloneMetaLogCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
do {
yield {
- ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": init request" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": init request" << dendl;
return state_init();
}
yield {
- ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status" << dendl;
return state_read_shard_status();
}
yield {
- ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status complete" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status complete" << dendl;
return state_read_shard_status_complete();
}
yield {
- ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": sending rest request" << dendl;
- return state_send_rest_request();
+ ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": sending rest request" << dendl;
+ return state_send_rest_request(dpp);
}
yield {
- ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": receiving rest response" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": receiving rest response" << dendl;
return state_receive_rest_response();
}
yield {
- ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries" << dendl;
return state_store_mdlog_entries();
}
} while (truncated);
yield {
- ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries complete" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries complete" << dendl;
return state_store_mdlog_entries_complete();
}
}
io_complete();
}), add_ref);
- int ret = mdlog->get_info_async(shard_id, completion.get());
+ int ret = mdlog->get_info_async(sync_env->dpp, shard_id, completion.get());
if (ret < 0) {
ldpp_dout(sync_env->dpp, 0) << "ERROR: mdlog->get_info_async() returned ret=" << ret << dendl;
return set_cr_error(ret);
return 0;
}
-int RGWCloneMetaLogCoroutine::state_send_rest_request()
+int RGWCloneMetaLogCoroutine::state_send_rest_request(const DoutPrefixProvider *dpp)
{
RGWRESTConn *conn = sync_env->conn;
init_new_io(http_op);
- int ret = http_op->aio_read();
+ int ret = http_op->aio_read(dpp);
if (ret < 0) {
- ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
http_op = NULL;
RGWAioCompletionNotifier *cn = stack->create_completion_notifier();
- int ret = mdlog->store_entries_in_shard(dest_entries, shard_id, cn->completion());
+ int ret = mdlog->store_entries_in_shard(sync_env->dpp, dest_entries, shard_id, cn->completion());
if (ret < 0) {
cn->put();
ldpp_dout(sync_env->dpp, 10) << "failed to store md log entries shard_id=" << shard_id << " ret=" << ret << dendl;
std::atomic<int64_t> counter = { 0 };
public:
RGWSyncErrorLogger(rgw::sal::RadosStore* _store, const string &oid_prefix, int _num_shards);
- RGWCoroutine *log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message);
+ RGWCoroutine *log_error_cr(const DoutPrefixProvider *dpp, const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message);
static string get_shard_oid(const string& oid_prefix, int shard_id);
};
virtual RGWCoroutine *alloc_cr() = 0;
virtual RGWCoroutine *alloc_finisher_cr() { return NULL; }
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
struct RGWMetaSyncEnv {
RGWMetaSyncEnv sync_env;
void init_sync_env(RGWMetaSyncEnv *env);
- int store_sync_info(const rgw_meta_sync_info& sync_info);
+ int store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info);
std::atomic<bool> going_down = { false };
int init();
void finish();
- int read_log_info(rgw_mdlog_info *log_info);
- int read_master_log_shards_info(const string& master_period, map<int, RGWMetadataLogInfo> *shards_info);
- int read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result);
- int read_sync_status(rgw_meta_sync_status *sync_status);
- int init_sync_status();
- int run_sync(optional_yield y);
+ int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info);
+ int read_master_log_shards_info(const DoutPrefixProvider *dpp, const string& master_period, map<int, RGWMetadataLogInfo> *shards_info);
+ int read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result);
+ int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status);
+ int init_sync_status(const DoutPrefixProvider *dpp);
+ int run_sync(const DoutPrefixProvider *dpp, optional_yield y);
void wakeup(int shard_id);
RGWMetaSyncStatusManager(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados)
: store(_store), master_log(this, store, async_rados, this)
{}
- int init();
+ int init(const DoutPrefixProvider *dpp);
- int read_sync_status(rgw_meta_sync_status *sync_status) {
- return master_log.read_sync_status(sync_status);
+ int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status) {
+ return master_log.read_sync_status(dpp, sync_status);
}
- int init_sync_status() { return master_log.init_sync_status(); }
- int read_log_info(rgw_mdlog_info *log_info) {
- return master_log.read_log_info(log_info);
+ int init_sync_status(const DoutPrefixProvider *dpp) { return master_log.init_sync_status(dpp); }
+ int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info) {
+ return master_log.read_log_info(dpp, log_info);
}
- int read_master_log_shards_info(const string& master_period, map<int, RGWMetadataLogInfo> *shards_info) {
- return master_log.read_master_log_shards_info(master_period, shards_info);
+ int read_master_log_shards_info(const DoutPrefixProvider *dpp, const string& master_period, map<int, RGWMetadataLogInfo> *shards_info) {
+ return master_log.read_master_log_shards_info(dpp, master_period, shards_info);
}
- int read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result) {
- return master_log.read_master_log_shards_next(period, shard_markers, result);
+ int read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result) {
+ return master_log.read_master_log_shards_next(dpp, period, shard_markers, result);
}
- int run(optional_yield y) { return master_log.run_sync(y); }
+ int run(const DoutPrefixProvider *dpp, optional_yield y) { return master_log.run_sync(dpp, y); }
// implements DoutPrefixProvider
}
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
void call_cr(RGWCoroutine *_cr) override {
if (cr) {
const RGWMDLogStatus& _op_status,
RGWMetaSyncShardMarkerTrack *_marker_tracker, const RGWSyncTraceNodeRef& _tn_parent);
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
class RGWShardCollectCR : public RGWCoroutine {
status(0) {}
virtual bool spawn_next() = 0;
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
// factory functions for meta sync coroutines needed in mdlog trimming
return 0;
}
-int source_bilog_markers(RGWSI_Zone* zone_svc,
+int source_bilog_markers(const DoutPrefixProvider *dpp,
+ RGWSI_Zone* zone_svc,
const rgw_sync_bucket_pipe& pipe,
BucketIndexShardsManager& remote_markers,
optional_yield y)
return -EINVAL;
}
- return rgw_read_remote_bilog_info(conn->second, *pipe.source.bucket,
+ return rgw_read_remote_bilog_info(dpp, conn->second, *pipe.source.bucket,
remote_markers, y);
}
// fetch remote markers
spawn::spawn(ioctx, [&] (spawn::yield_context yield) {
auto y = optional_yield{ioctx, yield};
- int r = source_bilog_markers(store->svc()->zone, entry.pipe,
+ int r = source_bilog_markers(dpp, store->svc()->zone, entry.pipe,
entry.remote_markers, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to fetch remote bilog markers: "
key(key), timestamp(timestamp)
{}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
librados::ObjectWriteOperation op;
int r = rgw_error_repo_write(op, key, timestamp);
if (r < 0) {
return r;
}
- r = obj.open();
+ r = obj.open(dpp);
if (r < 0) {
return r;
}
key(key), timestamp(timestamp)
{}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
librados::ObjectWriteOperation op;
int r = rgw_error_repo_remove(op, key, timestamp);
if (r < 0) {
return r;
}
- r = obj.open();
+ r = obj.open(dpp);
if (r < 0) {
return r;
}
src_bucket(_src_bucket), key(_key) {
}
-int RGWCallStatRemoteObjCR::operate() {
+int RGWCallStatRemoteObjCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
yield {
call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->store,
src_bucket, key, &mtime, &size, &etag, &attrs, &headers));
}
if (retcode < 0) {
- ldout(sync_env->cct, 10) << "RGWStatRemoteObjCR() returned " << retcode << dendl;
+ ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() returned " << retcode << dendl;
return set_cr_error(retcode);
}
- ldout(sync_env->cct, 20) << "stat of remote obj: z=" << sc->source_zone
+ ldpp_dout(dpp, 20) << "stat of remote obj: z=" << sc->source_zone
<< " b=" << src_bucket << " k=" << key
<< " size=" << size << " mtime=" << mtime << dendl;
yield {
}
}
if (retcode < 0) {
- ldout(sync_env->cct, 10) << "RGWStatRemoteObjCR() callback returned " << retcode << dendl;
+ ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() callback returned " << retcode << dendl;
return set_cr_error(retcode);
}
return set_cr_done();
~RGWCallStatRemoteObjCR() override {}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
virtual RGWStatRemoteObjCBCR *allocate_callback() {
return nullptr;
src_properties(_src_properties) {
}
- int init() override {
+ int init(const DoutPrefixProvider *dpp) override {
/* init input connection */
}
RGWRESTStreamRWRequest *in_req;
- int ret = conn->get_obj(src_obj, req_params, false /* send */, &in_req);
+ int ret = conn->get_obj(dpp, src_obj, req_params, false /* send */, &in_req);
if (ret < 0) {
- ldout(sc->cct, 0) << "ERROR: " << __func__ << "(): conn->get_obj() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): conn->get_obj() returned ret=" << ret << dendl;
return ret;
}
set_req(in_req);
- return RGWStreamReadHTTPResourceCRF::init();
+ return RGWStreamReadHTTPResourceCRF::init(dpp);
}
int decode_rest_obj(map<string, string>& headers, bufferlist& extra_data) override {
}
}
- void send_ready(const rgw_rest_obj& rest_obj) override {
+ void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override {
RGWRESTStreamS3PutObj *r = static_cast<RGWRESTStreamS3PutObj *>(req);
map<string, string> new_attrs;
RGWAccessControlPolicy policy;
- r->send_ready(target->conn->get_key(), new_attrs, policy);
+ r->send_ready(dpp, target->conn->get_key(), new_attrs, policy);
}
void handle_headers(const map<string, string>& headers) {
dest_obj(_dest_obj),
src_properties(_src_properties) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
/* init input */
in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc,
part_info(_part_info),
petag(_petag) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
/* init input */
in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc,
dest_obj(_dest_obj),
upload_id(_upload_id) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
}
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (retcode=" << retcode << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (retcode=" << retcode << ")" << dendl;
return set_cr_error(retcode);
}
attrs(_attrs),
upload_id(_upload_id) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
}
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl;
return set_cr_error(retcode);
}
{
}
}
- ldout(sc->cct, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl;
+ ldpp_dout(dpp, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl;
*upload_id = result.upload_id;
upload_id(_upload_id),
req_enc(_parts) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
}
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl;
return set_cr_error(retcode);
}
{
}
}
- ldout(sc->cct, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl;
+ ldpp_dout(dpp, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl;
return set_cr_done();
}
status_obj(_status_obj),
upload_id(_upload_id) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield call(new RGWAWSAbortMultipartCR(sc, dest_conn, dest_obj, upload_id));
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl;
/* ignore error, best effort */
}
yield call(new RGWRadosRemoveCR(sc->env->store, status_obj));
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl;
/* ignore error, best effort */
}
return set_cr_done();
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- yield call(new RGWSimpleRadosReadCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->svc->sysobj,
+ yield call(new RGWSimpleRadosReadCR<rgw_sync_aws_multipart_upload_info>(dpp, sync_env->async_rados, sync_env->svc->sysobj,
status_obj, &status, false));
if (retcode < 0 && retcode != -ENOENT) {
- ldout(sc->cct, 0) << "ERROR: failed to read sync status of object " << src_obj << " retcode=" << retcode << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read sync status of object " << src_obj << " retcode=" << retcode << dendl;
return retcode;
}
}
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to sync obj=" << src_obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << status.cur_part << " (error: " << cpp_strerror(-retcode) << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to sync obj=" << src_obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << status.cur_part << " (error: " << cpp_strerror(-retcode) << ")" << dendl;
ret_err = retcode;
yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id));
return set_cr_error(ret_err);
}
- yield call(new RGWSimpleRadosWriteCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->svc->sysobj, status_obj, status));
+ yield call(new RGWSimpleRadosWriteCR<rgw_sync_aws_multipart_upload_info>(dpp, sync_env->async_rados, sync_env->svc->sysobj, status_obj, status));
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl;
/* continue with upload anyway */
}
ldout(sc->cct, 20) << "sync of object=" << src_obj << " via multipart upload, finished sending part #" << status.cur_part << " etag=" << pcur_part_info->etag << dendl;
yield call(new RGWAWSCompleteMultipartCR(sc, target->conn.get(), dest_obj, status.upload_id, status.parts));
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to complete multipart upload of obj=" << src_obj << " (error: " << cpp_strerror(-retcode) << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to complete multipart upload of obj=" << src_obj << " (error: " << cpp_strerror(-retcode) << ")" << dendl;
ret_err = retcode;
yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id));
return set_cr_error(ret_err);
/* remove status obj */
yield call(new RGWRadosRemoveCR(sync_env->store, status_obj));
if (retcode < 0) {
- ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl;
/* ignore error, best effort */
}
return set_cr_done();
~RGWAWSHandleRemoteObjCBCR(){
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ret = decode_attr(attrs, RGW_ATTR_PG_VER, &src_pg_ver, (uint64_t)0);
if (ret < 0) {
src_pg_ver = 0; /* all or nothing */
}
}
- ldout(sc->cct, 4) << "AWS: download begin: z=" << sc->source_zone
+ ldpp_dout(dpp, 4) << "AWS: download begin: z=" << sc->source_zone
<< " b=" << src_bucket << " k=" << key << " size=" << size
<< " mtime=" << mtime << " etag=" << etag
<< " zone_short_id=" << src_zone_short_id << " pg_ver=" << src_pg_ver
AWSSyncInstanceEnv& _instance) : RGWCoroutine(_sc->cct), sc(_sc),
sync_pipe(_sync_pipe), key(_key),
mtime(_mtime), instance(_instance) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ldout(sc->cct, 0) << ": remove remote obj: z=" << sc->source_zone
<< " b=" <<sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << dendl;
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
conf(_conf) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldout(sync_env->cct, 5) << conf->id << ": get elasticsearch info for zone: " << sc->source_zone << dendl;
+ ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch info for zone: " << sc->source_zone << dendl;
yield call(new RGWReadRESTResourceCR<ESInfo> (sync_env->cct,
conf->conn.get(),
sync_env->http_manager,
&(conf->default_headers),
&(conf->es_info)));
if (retcode < 0) {
- ldout(sync_env->cct, 5) << conf->id << ": get elasticsearch failed: " << retcode << dendl;
+ ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch failed: " << retcode << dendl;
return set_cr_error(retcode);
}
- ldout(sync_env->cct, 5) << conf->id << ": got elastic version=" << conf->es_info.get_version_str() << dendl;
+ ldpp_dout(dpp, 5) << conf->id << ": got elastic version=" << conf->es_info.get_version_str() << dendl;
return set_cr_done();
}
return 0;
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
conf(_conf) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldout(sc->cct, 5) << conf->id << ": put elasticsearch index for zone: " << sc->source_zone << dendl;
+ ldpp_dout(dpp, 5) << conf->id << ": put elasticsearch index for zone: " << sc->source_zone << dendl;
yield {
string path = conf->get_index_path();
std::unique_ptr<es_index_config_base> index_conf;
if (conf->es_info.version >= ES_V5) {
- ldout(sc->cct, 0) << "elasticsearch: index mapping: version >= 5" << dendl;
+ ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version >= 5" << dendl;
index_conf.reset(new es_index_config<es_type_v5>(settings, conf->es_info.version));
} else {
- ldout(sc->cct, 0) << "elasticsearch: index mapping: version < 5" << dendl;
+ ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version < 5" << dendl;
index_conf.reset(new es_index_config<es_type_v2>(settings, conf->es_info.version));
}
call(new RGWPutRESTResourceCR<es_index_config_base, int, _err_response> (sc->cct,
if (err_response.error.type != "index_already_exists_exception" &&
err_response.error.type != "resource_already_exists_exception") {
- ldout(sync_env->cct, 0) << "elasticsearch: failed to initialize index: response.type=" << err_response.error.type << " response.reason=" << err_response.error.reason << dendl;
+ ldpp_dout(dpp, 0) << "elasticsearch: failed to initialize index: response.type=" << err_response.error.type << " response.reason=" << err_response.error.reason << dendl;
return set_cr_error(retcode);
}
- ldout(sync_env->cct, 0) << "elasticsearch: index already exists, assuming external initialization" << dendl;
+ ldpp_dout(dpp, 0) << "elasticsearch: index already exists, assuming external initialization" << dendl;
}
return set_cr_done();
}
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
conf(_conf) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield call(new RGWElasticGetESInfoCBCR(sc, conf));
ElasticConfigRef _conf, uint64_t _versioned_epoch) : RGWStatRemoteObjCBCR(_sc, _sync_pipe.info.source_bs.bucket, _key),
sync_pipe(_sync_pipe), conf(_conf),
versioned_epoch(_versioned_epoch) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldout(sync_env->cct, 10) << ": stat of remote obj: z=" << sc->source_zone
+ ldpp_dout(dpp, 10) << ": stat of remote obj: z=" << sc->source_zone
<< " b=" << sync_pipe.info.source_bs.bucket << " k=" << key
<< " size=" << size << " mtime=" << mtime << dendl;
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env),
sync_pipe(_sync_pipe), key(_key),
mtime(_mtime), conf(_conf) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldout(sync_env->cct, 10) << ": remove remote obj: z=" << sc->source_zone
+ ldpp_dout(dpp, 10) << ": remove remote obj: z=" << sc->source_zone
<< " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << dendl;
yield {
string path = conf->get_obj_path(sync_pipe.dest_bucket_info, key);
bool valid = es_query.compile(&err);
if (!valid) {
- ldout(s->cct, 10) << "invalid query, failed generating request json" << dendl;
+ ldpp_dout(this, 10) << "invalid query, failed generating request json" << dendl;
op_ret = -EINVAL;
return;
}
if (marker > 0) {
params.push_back(param_pair_t("from", marker_str.c_str()));
}
- ldout(s->cct, 20) << "sending request to elasticsearch, payload=" << string(in.c_str(), in.length()) << dendl;
+ ldpp_dout(this, 20) << "sending request to elasticsearch, payload=" << string(in.c_str(), in.length()) << dendl;
auto& extra_headers = es_module->get_request_headers();
- op_ret = conn->get_resource(resource, ¶ms, &extra_headers,
+ op_ret = conn->get_resource(s, resource, ¶ms, &extra_headers,
out, &in, nullptr, y);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to fetch resource (r=" << resource << ", ret=" << op_ret << ")" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to fetch resource (r=" << resource << ", ret=" << op_ret << ")" << dendl;
return;
}
- ldout(s->cct, 20) << "response: " << string(out.c_str(), out.length()) << dendl;
+ ldpp_dout(this, 20) << "response: " << string(out.c_str(), out.length()) << dendl;
JSONParser jparser;
if (!jparser.parse(out.c_str(), out.length())) {
- ldout(s->cct, 0) << "ERROR: failed to parse elasticsearch response" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to parse elasticsearch response" << dendl;
op_ret = -EINVAL;
return;
}
try {
decode_json_obj(response, &jparser);
} catch (const JSONDecoder::err& e) {
- ldout(s->cct, 0) << "ERROR: failed to decode JSON input: " << e.what() << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to decode JSON input: " << e.what() << dendl;
op_ret = -EINVAL;
return;
}
RGWHandler_REST *handler = new RGWHandler_REST_MDSearch_S3(auth_registry);
- ldout(s->cct, 20) << __func__ << " handler=" << typeid(*handler).name()
+ ldpp_dout(s, 20) << __func__ << " handler=" << typeid(*handler).name()
<< dendl;
return handler;
}
public:
RGWLogStatRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket& _src_bucket, rgw_obj_key& _key) : RGWStatRemoteObjCBCR(_sc, _src_bucket, _key) {}
- int operate() override {
- ldout(sync_env->cct, 0) << "SYNC_LOG: stat of remote obj: z=" << sc->source_zone
+ int operate(const DoutPrefixProvider *dpp) override {
+ ldpp_dout(dpp, 0) << "SYNC_LOG: stat of remote obj: z=" << sc->source_zone
<< " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime
<< " attrs=" << attrs << dendl;
return set_cr_done();
return true;
}
- int operate_wrapper() override {
+ int operate_wrapper(const DoutPrefixProvider *dpp) override {
reenter(&wrapper_state) {
while (!is_done()) {
- ldout(cct, 20) << __func__ << "(): operate_wrapper() -> operate()" << dendl;
- operate_ret = operate();
+ ldpp_dout(dpp, 20) << __func__ << "(): operate_wrapper() -> operate()" << dendl;
+ operate_ret = operate(dpp);
if (operate_ret < 0) {
- ldout(cct, 20) << *this << ": operate() returned r=" << operate_ret << dendl;
+ ldpp_dout(dpp, 20) << *this << ": operate() returned r=" << operate_ret << dendl;
}
if (!is_done()) {
yield;
}
}
- ldout(cct, 20) << __func__ << "(): RGWSingletonCR: operate_wrapper() done, need to wake up " << waiters.size() << " waiters" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): RGWSingletonCR: operate_wrapper() done, need to wake up " << waiters.size() << " waiters" << dendl;
/* we're done, can't yield anymore */
WaiterInfoRef waiter;
retention_days = conf->events_retention_days;
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
rule.init_simple_days_rule("Pubsub Expiration", "" /* all objects in bucket */, retention_days);
try {
old_config.decode(iter);
} catch (const buffer::error& e) {
- ldpp_dout(sync_env->dpp, 0) << __func__ << "(): decode life cycle config failed" << dendl;
+ ldpp_dout(dpp, 0) << __func__ << "(): decode life cycle config failed" << dendl;
}
}
if (old_rule.get_prefix().empty() &&
old_rule.get_expiration().get_days() == retention_days &&
old_rule.is_enabled()) {
- ldpp_dout(sync_env->dpp, 20) << "no need to set lifecycle rule on bucket, existing rule matches config" << dendl;
+ ldpp_dout(dpp, 20) << "no need to set lifecycle rule on bucket, existing rule matches config" << dendl;
return set_cr_done();
}
}
yield call(new RGWBucketLifecycleConfigCR(sync_env->async_rados,
sync_env->store,
lc_config,
- sync_env->dpp));
+ dpp));
if (retcode < 0) {
- ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to set lifecycle on bucket: ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to set lifecycle on bucket: ret=" << retcode << dendl;
return set_cr_error(retcode);
}
sub_conf(sub->sub_conf) {
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
get_bucket_info.tenant = conf->user.tenant;
get_bucket_info.bucket_name = sub_conf->data_bucket_name;
sync_env->store,
get_bucket_info,
sub->get_bucket_info_result,
- sync_env->dpp));
+ dpp));
if (retcode < 0 && retcode != -ENOENT) {
- ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to geting bucket info: " << "tenant="
+ ldpp_dout(dpp, 1) << "ERROR: failed to geting bucket info: " << "tenant="
<< get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << ": ret=" << retcode << dendl;
}
if (retcode == 0) {
int ret = sub->data_access->get_bucket(result->bucket->get_info(), result->bucket->get_attrs(), &sub->bucket);
if (ret < 0) {
- ldpp_dout(sync_env->dpp, 1) << "ERROR: data_access.get_bucket() bucket=" << result->bucket << " failed, ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: data_access.get_bucket() bucket=" << result->bucket << " failed, ret=" << ret << dendl;
return set_cr_error(ret);
}
}
yield call(new InitBucketLifecycleCR(sc, conf,
sub->get_bucket_info_result->bucket.get()));
if (retcode < 0) {
- ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to init lifecycle on bucket (bucket=" << sub_conf->data_bucket_name << ") ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to init lifecycle on bucket (bucket=" << sub_conf->data_bucket_name << ") ret=" << retcode << dendl;
return set_cr_error(retcode);
}
create_bucket.user_info = sub->env->data_user_info;
create_bucket.bucket_name = sub_conf->data_bucket_name;
- ldpp_dout(sync_env->dpp, 20) << "pubsub: bucket create: using user info: " << json_str("obj", *sub->env->data_user_info, true) << dendl;
+ ldpp_dout(dpp, 20) << "pubsub: bucket create: using user info: " << json_str("obj", *sub->env->data_user_info, true) << dendl;
yield call(new RGWBucketCreateLocalCR(sync_env->async_rados,
sync_env->store,
create_bucket,
- sync_env->dpp));
+ dpp));
if (retcode < 0) {
- ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create bucket: " << "tenant="
+ ldpp_dout(dpp, 1) << "ERROR: failed to create bucket: " << "tenant="
<< get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << ": ret=" << retcode << dendl;
return set_cr_error(retcode);
}
}
/* failed twice on -ENOENT, unexpected */
- ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create bucket " << "tenant=" << get_bucket_info.tenant
+ ldpp_dout(dpp, 1) << "ERROR: failed to create bucket " << "tenant=" << get_bucket_info.tenant
<< " name=" << get_bucket_info.bucket_name << dendl;
return set_cr_error(-EIO);
}
oid_prefix(sub->sub_conf->data_oid_prefix) {
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
rgw_object_simple_put_params put_obj;
reenter(this) {
yield call(new RGWObjectSimplePutCR(sync_env->async_rados,
sync_env->store,
put_obj,
- sync_env->dpp));
+ dpp));
if (retcode < 0) {
- ldpp_dout(sync_env->dpp, 10) << "failed to store event: " << put_obj.bucket << "/" << put_obj.key << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 10) << "failed to store event: " << put_obj.bucket << "/" << put_obj.key << " ret=" << retcode << dendl;
return set_cr_error(retcode);
} else {
- ldpp_dout(sync_env->dpp, 20) << "event stored: " << put_obj.bucket << "/" << put_obj.key << dendl;
+ ldpp_dout(dpp, 20) << "event stored: " << put_obj.bucket << "/" << put_obj.key << dendl;
}
return set_cr_done();
sub_conf(_sub->sub_conf) {
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ceph_assert(sub_conf->push_endpoint);
yield call(sub_conf->push_endpoint->send_to_completion_async(*event.get(), sync_env));
if (retcode < 0) {
- ldout(sync_env->cct, 10) << "failed to push event: " << event->id <<
+ ldpp_dout(dpp, 10) << "failed to push event: " << event->id <<
" to endpoint: " << sub_conf->push_endpoint_name << " ret=" << retcode << dendl;
return set_cr_error(retcode);
}
- ldout(sync_env->cct, 20) << "event: " << event->id <<
+ ldpp_dout(dpp, 20) << "event: " << event->id <<
" pushed to endpoint: " << sub_conf->push_endpoint_name << dendl;
return set_cr_done();
}
}
~GetSubCR() { }
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
if (owner.empty()) {
- ldout(sync_env->cct, 1) << "ERROR: missing user info when getting subscription: " << sub_name << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: missing user info when getting subscription: " << sub_name << dendl;
mgr->remove_get_sub(owner, sub_name);
return set_cr_error(-EINVAL);
} else {
rgw_raw_obj obj;
ps.get_sub_meta_obj(sub_name, &obj);
bool empty_on_enoent = false;
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj,
obj,
&user_sub_conf, empty_on_enoent));
}
yield (*ref)->call_init_cr(this);
if (retcode < 0) {
- ldout(sync_env->cct, 1) << "ERROR: failed to init subscription when getting subscription: " << sub_name << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to init subscription when getting subscription: " << sub_name << dendl;
mgr->remove_get_sub(owner, sub_name);
return set_cr_error(retcode);
}
PSEnvRef& _env) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
env(_env), conf(env->conf) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldpp_dout(sync_env->dpp, 1) << ": init pubsub config zone=" << sc->source_zone << dendl;
+ ldpp_dout(dpp, 1) << ": init pubsub config zone=" << sc->source_zone << dendl;
/* nothing to do here right now */
create_user.user = conf->user;
create_user.max_buckets = 0; /* unlimited */
create_user.display_name = "pubsub";
create_user.generate_key = false;
- yield call(new RGWUserCreateCR(sync_env->async_rados, sync_env->store, create_user, sync_env->dpp));
+ yield call(new RGWUserCreateCR(sync_env->async_rados, sync_env->store, create_user, dpp));
if (retcode < 0 && retcode != -ERR_USER_EXIST) {
- ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl;
return set_cr_error(retcode);
}
get_user_info.user = conf->user;
- yield call(new RGWGetUserInfoCR(sync_env->async_rados, sync_env->store, get_user_info, env->data_user_info, sync_env->dpp));
+ yield call(new RGWGetUserInfoCR(sync_env->async_rados, sync_env->store, get_user_info, env->data_user_info, dpp));
if (retcode < 0) {
- ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl;
return set_cr_error(retcode);
}
- ldpp_dout(sync_env->dpp, 20) << "pubsub: get user info cr returned: " << json_str("obj", *env->data_user_info, true) << dendl;
+ ldpp_dout(dpp, 20) << "pubsub: get user info cr returned: " << json_str("obj", *env->data_user_info, true) << dendl;
return set_cr_done();
topics(_topics) {
*topics = std::make_shared<vector<PSTopicConfigRef> >();
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ps.get_bucket_meta_obj(bucket, &bucket_obj);
ps.get_meta_obj(&user_obj);
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_pubsub_bucket_topics>;
yield {
bool empty_on_enoent = true;
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj,
bucket_obj,
&bucket_topics, empty_on_enoent));
}
return set_cr_error(retcode);
}
- ldout(sync_env->cct, 20) << "RGWPSFindBucketTopicsCR(): found " << bucket_topics.topics.size() << " topics for bucket " << bucket << dendl;
+ ldpp_dout(dpp, 20) << "RGWPSFindBucketTopicsCR(): found " << bucket_topics.topics.size() << " topics for bucket " << bucket << dendl;
if (!bucket_topics.topics.empty()) {
using ReadUserTopicsInfoCR = RGWSimpleRadosReadCR<rgw_pubsub_topics>;
yield {
bool empty_on_enoent = true;
- call(new ReadUserTopicsInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ call(new ReadUserTopicsInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj,
user_obj,
&user_topics, empty_on_enoent));
}
has_subscriptions(false),
event_handled(false) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldout(sc->cct, 20) << ": handle event: obj: z=" << sc->source_zone
+ ldpp_dout(dpp, 20) << ": handle event: obj: z=" << sc->source_zone
<< " event=" << json_str("event", *event, false)
<< " owner=" << owner << dendl;
- ldout(sc->cct, 20) << "pubsub: " << topics->size() << " topics found for path" << dendl;
+ ldpp_dout(dpp, 20) << "pubsub: " << topics->size() << " topics found for path" << dendl;
// outside caller should check that
ceph_assert(!topics->empty());
// loop over all topics related to the bucket/object
for (titer = topics->begin(); titer != topics->end(); ++titer) {
- ldout(sc->cct, 20) << ": notification for " << event->source << ": topic=" <<
+ ldpp_dout(dpp, 20) << ": notification for " << event->source << ": topic=" <<
(*titer)->name << ", has " << (*titer)->subs.size() << " subscriptions" << dendl;
// loop over all subscriptions of the topic
for (siter = (*titer)->subs.begin(); siter != (*titer)->subs.end(); ++siter) {
- ldout(sc->cct, 20) << ": subscription: " << *siter << dendl;
+ ldpp_dout(dpp, 20) << ": subscription: " << *siter << dendl;
has_subscriptions = true;
// try to read subscription configuration
yield PSManager::call_get_subscription_cr(sc, env->manager, this, owner, *siter, &sub);
if (retcode < 0) {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_missing_conf);
- ldout(sc->cct, 1) << "ERROR: failed to find subscription config for subscription=" << *siter
+ ldpp_dout(dpp, 1) << "ERROR: failed to find subscription config for subscription=" << *siter
<< " ret=" << retcode << dendl;
if (retcode == -ENOENT) {
// missing subscription info should be reflected back as invalid argument
}
if (sub->sub_conf->s3_id.empty()) {
// subscription was not made by S3 compatible API
- ldout(sc->cct, 20) << "storing event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 20) << "storing event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
yield call(PSSubscription::store_event_cr(sc, sub, event));
if (retcode < 0) {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_fail);
- ldout(sc->cct, 1) << "ERROR: failed to store event for subscription=" << *siter << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to store event for subscription=" << *siter << " ret=" << retcode << dendl;
} else {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_ok);
event_handled = true;
}
if (sub->sub_conf->push_endpoint) {
- ldout(sc->cct, 20) << "push event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 20) << "push event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
yield call(PSSubscription::push_event_cr(sc, sub, event));
if (retcode < 0) {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed);
- ldout(sc->cct, 1) << "ERROR: failed to push event for subscription=" << *siter << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to push event for subscription=" << *siter << " ret=" << retcode << dendl;
} else {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok);
event_handled = true;
}
} else {
// subscription was made by S3 compatible API
- ldout(sc->cct, 20) << "storing s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 20) << "storing s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
s3_event->configurationId = sub->sub_conf->s3_id;
s3_event->opaque_data = (*titer)->opaque_data;
yield call(PSSubscription::store_event_cr(sc, sub, s3_event));
if (retcode < 0) {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_fail);
- ldout(sc->cct, 1) << "ERROR: failed to store s3 event for subscription=" << *siter << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to store s3 event for subscription=" << *siter << " ret=" << retcode << dendl;
} else {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_ok);
event_handled = true;
}
if (sub->sub_conf->push_endpoint) {
- ldout(sc->cct, 20) << "push s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 20) << "push s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl;
yield call(PSSubscription::push_event_cr(sc, sub, s3_event));
if (retcode < 0) {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed);
- ldout(sc->cct, 1) << "ERROR: failed to push s3 event for subscription=" << *siter << " ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to push s3 event for subscription=" << *siter << " ret=" << retcode << dendl;
} else {
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok);
event_handled = true;
versioned_epoch(_versioned_epoch),
topics(_topics) {
}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldout(sc->cct, 20) << ": stat of remote obj: z=" << sc->source_zone
+ ldpp_dout(dpp, 20) << ": stat of remote obj: z=" << sc->source_zone
<< " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " size=" << size << " mtime=" << mtime
<< " attrs=" << attrs << dendl;
{
~RGWPSHandleObjCreateCR() override {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield call(new RGWPSFindBucketTopicsCR(sc, env, sync_pipe.dest_bucket_info.owner,
sync_pipe.info.source_bs.bucket, key,
rgw::notify::ObjectCreated,
&topics));
if (retcode < 0) {
- ldout(sc->cct, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl;
return set_cr_error(retcode);
}
if (topics->empty()) {
- ldout(sc->cct, 20) << "no topics found for " << sync_pipe.info.source_bs.bucket << "/" << key << dendl;
+ ldpp_dout(dpp, 20) << "no topics found for " << sync_pipe.info.source_bs.bucket << "/" << key << dendl;
return set_cr_done();
}
yield call(new RGWPSHandleRemoteObjCR(sc, sync_pipe, key, env, versioned_epoch, topics));
bucket(_sync_pipe.dest_bucket_info.bucket),
key(_key),
mtime(_mtime), event_type(_event_type) {}
- int operate() override {
+ int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- ldout(sc->cct, 20) << ": remove remote obj: z=" << sc->source_zone
+ ldpp_dout(dpp, 20) << ": remove remote obj: z=" << sc->source_zone
<< " b=" << bucket << " k=" << key << " mtime=" << mtime << dendl;
yield call(new RGWPSFindBucketTopicsCR(sc, env, owner, bucket, key, event_type, &topics));
if (retcode < 0) {
- ldout(sc->cct, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl;
return set_cr_error(retcode);
}
if (topics->empty()) {
- ldout(sc->cct, 20) << "no topics found for " << bucket << "/" << key << dendl;
+ ldpp_dout(dpp, 20) << "no topics found for " << bucket << "/" << key << dendl;
return set_cr_done();
}
// at this point we don't know whether we need the ceph event or S3 event
bool exists;
topic_name = s->info.args.get("topic", &exists);
if (!exists) {
- ldout(s->cct, 1) << "missing required param 'topic'" << dendl;
+ ldpp_dout(this, 1) << "missing required param 'topic'" << dendl;
return -EINVAL;
}
event_id = s->info.args.get("event-id", &exists);
if (!exists) {
- ldout(s->cct, 1) << "missing required param 'event-id'" << dendl;
+ ldpp_dout(this, 1) << "missing required param 'event-id'" << dendl;
return -EINVAL;
}
return 0;
const int ret = s->info.args.get_int("max-entries", &max_entries,
RGWPubSub::Sub::DEFAULT_MAX_EVENTS);
if (ret < 0) {
- ldout(s->cct, 1) << "failed to parse 'max-entries' param" << dendl;
+ ldpp_dout(this, 1) << "failed to parse 'max-entries' param" << dendl;
return -EINVAL;
}
return 0;
bool exists;
topic_name = s->info.args.get("topic", &exists);
if (!exists) {
- ldout(s->cct, 1) << "missing required param 'topic'" << dendl;
+ ldpp_dout(this, 1) << "missing required param 'topic'" << dendl;
return -EINVAL;
}
}
rgw::notify::from_string_list(events_str, events);
if (std::find(events.begin(), events.end(), rgw::notify::UnknownEvent) != events.end()) {
- ldout(s->cct, 1) << "invalid event type in list: " << events_str << dendl;
+ ldpp_dout(this, 1) << "invalid event type in list: " << events_str << dendl;
return -EINVAL;
}
return notif_bucket_path(s->object->get_name(), bucket_name);
ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
auto b = ps->get_bucket(bucket_info.bucket);
- op_ret = b->create_notification(topic_name, events, y);
+ op_ret = b->create_notification(this, topic_name, events, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to create notification for topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to create notification for topic '" << topic_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully created notification for topic '" << topic_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully created notification for topic '" << topic_name << "'" << dendl;
}
// command: DELETE /notifications/bucket/<bucket>?topic=<topic-name>
bool exists;
topic_name = s->info.args.get("topic", &exists);
if (!exists) {
- ldout(s->cct, 1) << "missing required param 'topic'" << dendl;
+ ldpp_dout(this, 1) << "missing required param 'topic'" << dendl;
return -EINVAL;
}
return notif_bucket_path(s->object->get_name(), bucket_name);
ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
auto b = ps->get_bucket(bucket_info.bucket);
- op_ret = b->remove_notification(topic_name, y);
+ op_ret = b->remove_notification(this, topic_name, y);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to remove notification from topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(s, 1) << "failed to remove notification from topic '" << topic_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully removed notification from topic '" << topic_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully removed notification from topic '" << topic_name << "'" << dendl;
}
// command: GET /notifications/bucket/<bucket>
auto b = ps->get_bucket(bucket_info.bucket);
op_ret = b->get_topics(&result);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get topics, ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get topics, ret=" << op_ret << dendl;
return;
}
}
}
}
- ldout(s->cct, 20) << __func__ << " handler=" << (handler ? typeid(*handler).name() : "<null>") << dendl;
+ ldpp_dout(s, 20) << __func__ << " handler=" << (handler ? typeid(*handler).name() : "<null>") << dendl;
return handler;
}
#define dout_context g_ceph_context
-#define dout_subsys ceph_subsys_rgw_sync
RGWSyncTraceNode::RGWSyncTraceNode(CephContext *_cct, uint64_t _handle,
const RGWSyncTraceNodeRef& _parent,
RGWSyncTraceServiceMapThread(RGWRados *_store, RGWSyncTraceManager *_manager)
: RGWRadosThread(_store, "sync-trace"), store(_store), manager(_manager) {}
- int process() override;
+ int process(const DoutPrefixProvider *dpp) override;
};
-int RGWSyncTraceServiceMapThread::process()
+int RGWSyncTraceServiceMapThread::process(const DoutPrefixProvider *dpp)
{
map<string, string> status;
status["current_sync"] = manager->get_active_names();
static std::map<std::string, std::string>* ext_mime_map;
-int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool,
+int rgw_init_ioctx(const DoutPrefixProvider *dpp,
+ librados::Rados *rados, const rgw_pool& pool,
librados::IoCtx& ioctx, bool create,
bool mostly_omap)
{
if (r == -ENOENT && create) {
r = rados->pool_create(pool.name.c_str());
if (r == -ERANGE) {
- dout(0)
+ ldpp_dout(dpp, 0)
<< __func__
<< " ERROR: librados::Rados::pool_create returned " << cpp_strerror(-r)
<< " (this can be due to a pool or placement group misconfiguration, e.g."
stringify(bias) + "\"}",
inbl, NULL, NULL);
if (r < 0) {
- dout(10) << __func__ << " warning: failed to set pg_autoscale_bias on "
+ ldpp_dout(dpp, 10) << __func__ << " warning: failed to set pg_autoscale_bias on "
<< pool.name << dendl;
}
// set pg_num_min
stringify(min) + "\"}",
inbl, NULL, NULL);
if (r < 0) {
- dout(10) << __func__ << " warning: failed to set pg_num_min on "
+ ldpp_dout(dpp, 10) << __func__ << " warning: failed to set pg_num_min on "
<< pool.name << dendl;
}
// set recovery_priority
stringify(p) + "\"}",
inbl, NULL, NULL);
if (r < 0) {
- dout(10) << __func__ << " warning: failed to set recovery_priority on "
+ ldpp_dout(dpp, 10) << __func__ << " warning: failed to set recovery_priority on "
<< pool.name << dendl;
}
}
return 0;
}
-int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive,
+int rgw_put_system_obj(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive,
RGWObjVersionTracker *objv_tracker, real_time set_mtime, optional_yield y, map<string, bufferlist> *pattrs)
{
map<string,bufferlist> no_attrs;
.set_exclusive(exclusive)
.set_mtime(set_mtime)
.set_attrs(*pattrs)
- .write(data, y);
+ .write(dpp, data, y);
return ret;
}
ret = rop.set_cache_info(cache_info)
.set_refresh_version(refresh_version)
- .read(&bl, y);
+ .read(dpp, &bl, y);
if (ret == -ECANCELED) {
/* raced, restart */
if (!original_readv.empty()) {
return 0;
}
-int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid,
+int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
+ RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid,
RGWObjVersionTracker *objv_tracker, optional_yield y)
{
auto obj_ctx = sysobj_svc->init_obj_ctx();
rgw_raw_obj obj(pool, oid);
return sysobj.wop()
.set_objv_tracker(objv_tracker)
- .remove(y);
+ .remove(dpp, y);
}
thread_local bool is_asio_thread = false;
-int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid,
+int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectReadOperation *op, bufferlist* pbl,
optional_yield y, int flags)
{
}
// work on asio threads should be asynchronous, so warn when they block
if (is_asio_thread) {
- dout(20) << "WARNING: blocking librados call" << dendl;
+ ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl;
}
return ioctx.operate(oid, op, nullptr, flags);
}
-int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid,
+int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectWriteOperation *op, optional_yield y,
int flags)
{
return -ec.value();
}
if (is_asio_thread) {
- dout(20) << "WARNING: blocking librados call" << dendl;
+ ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl;
}
return ioctx.operate(oid, op, flags);
}
-int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid,
+int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl,
optional_yield y)
{
return -ec.value();
}
if (is_asio_thread) {
- dout(20) << "WARNING: blocking librados call" << dendl;
+ ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl;
}
return ioctx.notify2(oid, bl, timeout_ms, pbl);
}
struct obj_version;
-int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool,
+int rgw_init_ioctx(const DoutPrefixProvider *dpp,
+ librados::Rados *rados, const rgw_pool& pool,
librados::IoCtx& ioctx,
bool create = false,
bool mostly_omap = false);
int rgw_parse_list_of_flags(struct rgw_name_to_flag *mapping,
const string& str, uint32_t *perm);
-int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive,
+int rgw_put_system_obj(const DoutPrefixProvider *dpp, RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive,
RGWObjVersionTracker *objv_tracker, real_time set_mtime, optional_yield y, map<string, bufferlist> *pattrs = NULL);
int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& key, bufferlist& bl,
RGWObjVersionTracker *objv_tracker, real_time *pmtime, optional_yield y, const DoutPrefixProvider *dpp, map<string, bufferlist> *pattrs = NULL,
rgw_cache_entry_info *cache_info = NULL,
boost::optional<obj_version> refresh_version = boost::none);
-int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid,
+int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
+ RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid,
RGWObjVersionTracker *objv_tracker, optional_yield y);
const char *rgw_find_mime_by_ext(string& ext);
extern thread_local bool is_asio_thread;
/// perform the rados operation, using the yield context when given
-int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid,
+int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectReadOperation *op, bufferlist* pbl,
optional_yield y, int flags = 0);
-int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid,
+int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectWriteOperation *op, optional_yield y,
int flags = 0);
-int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid,
+int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl,
optional_yield y);
string oid, key;
get_obj_bucket_and_oid_loc(obj, oid, key);
- ldout(s->cct, 20) << "NOTICE: head obj oid= " << oid << dendl;
+ ldpp_dout(s, 20) << "NOTICE: head obj oid= " << oid << dendl;
const set<string> obj_key{RGW_OBJ_TORRENT};
map<string, bufferlist> m;
- const int r = object->omap_get_vals_by_keys(oid, obj_key, &m);
+ const int r = object->omap_get_vals_by_keys(s, oid, obj_key, &m);
if (r < 0) {
- ldout(s->cct, 0) << "ERROR: omap_get_vals_by_keys failed: " << r << dendl;
+ ldpp_dout(s, 0) << "ERROR: omap_get_vals_by_keys failed: " << r << dendl;
return r;
}
if (m.size() != 1) {
- ldout(s->cct, 0) << "ERROR: omap key " RGW_OBJ_TORRENT " not found" << dendl;
+ ldpp_dout(s, 0) << "ERROR: omap key " RGW_OBJ_TORRENT " not found" << dendl;
return -EINVAL;
}
bl.append(std::move(m.begin()->second));
ret = save_torrent_file(y);
if (0 != ret)
{
- ldout(s->cct, 0) << "ERROR: failed to save_torrent_file() ret= "<< ret << dendl;
+ ldpp_dout(s, 0) << "ERROR: failed to save_torrent_file() ret= "<< ret << dendl;
return ret;
}
if (announce_list.empty())
{
- ldout(s->cct, 5) << "NOTICE: announce_list is empty " << dendl;
+ ldpp_dout(s, 5) << "NOTICE: announce_list is empty " << dendl;
return;
}
int op_ret = 0;
string key = RGW_OBJ_TORRENT;
- op_ret = s->object->omap_set_val_by_key(key, bl, false, y);
+ op_ret = s->object->omap_set_val_by_key(s, key, bl, false, y);
if (op_ret < 0)
{
- ldout(s->cct, 0) << "ERROR: failed to omap_set() op_ret = " << op_ret << dendl;
+ ldpp_dout(s, 0) << "ERROR: failed to omap_set() op_ret = " << op_ret << dendl;
return op_ret;
}
stop();
}
- int start() {
- int r = store->getRados()->get_raw_obj_ref(obj, &ref);
+ int start(const DoutPrefixProvider *dpp) {
+ int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
}
}
if (r < 0) {
- lderr(store->ctx()) << "Failed to watch " << ref.obj
+ ldpp_dout(dpp, -1) << "Failed to watch " << ref.obj
<< " with " << cpp_strerror(-r) << dendl;
ref.pool.ioctx().close();
return r;
}
- ldout(store->ctx(), 10) << "Watching " << ref.obj.oid << dendl;
+ ldpp_dout(dpp, 10) << "Watching " << ref.obj.oid << dendl;
return 0;
}
/// concurrent requests
class BucketTrimShardCollectCR : public RGWShardCollectCR {
static constexpr int MAX_CONCURRENT_SHARDS = 16;
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* const store;
const RGWBucketInfo& bucket_info;
const std::vector<std::string>& markers; //< shard markers to trim
size_t i{0}; //< index of current shard marker
public:
- BucketTrimShardCollectCR(rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info,
+ BucketTrimShardCollectCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info,
const std::vector<std::string>& markers)
: RGWShardCollectCR(store->ctx(), MAX_CONCURRENT_SHARDS),
- store(store), bucket_info(bucket_info), markers(markers)
+ dpp(dpp), store(store), bucket_info(bucket_info), markers(markers)
{}
bool spawn_next() override;
};
// skip empty markers
if (!marker.empty()) {
- ldout(cct, 10) << "trimming bilog shard " << shard_id
+ ldpp_dout(dpp, 10) << "trimming bilog shard " << shard_id
<< " of " << bucket_info.bucket << " at marker " << marker << dendl;
- spawn(new RGWRadosBILogTrimCR(store, bucket_info, shard_id,
+ spawn(new RGWRadosBILogTrimCR(dpp, store, bucket_info, shard_id,
std::string{}, marker),
false);
return true;
source_policy = make_shared<rgw_bucket_get_sync_policy_result>();
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int BucketTrimInstanceCR::operate()
+int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
- ldout(cct, 4) << "starting trim on bucket=" << bucket_instance << dendl;
+ ldpp_dout(dpp, 4) << "starting trim on bucket=" << bucket_instance << dendl;
get_policy_params.zone = zone_id;
get_policy_params.bucket = bucket;
auto ziter = zone_conn_map.find(zid);
if (ziter == zone_conn_map.end()) {
- ldout(cct, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl;
return set_cr_error(-ECANCELED);
}
using StatusCR = RGWReadRESTResourceCR<StatusShards>;
retcode = take_min_status(cct, peer_status.begin(), peer_status.end(),
&min_markers);
if (retcode < 0) {
- ldout(cct, 4) << "failed to correlate bucket sync status from peers" << dendl;
+ ldpp_dout(dpp, 4) << "failed to correlate bucket sync status from peers" << dendl;
return set_cr_error(retcode);
}
// trim shards with a ShardCollectCR
- ldout(cct, 10) << "trimming bilogs for bucket=" << pbucket_info->bucket
+ ldpp_dout(dpp, 10) << "trimming bilogs for bucket=" << pbucket_info->bucket
<< " markers=" << min_markers << ", shards=" << min_markers.size() << dendl;
set_status("trimming bilog shards");
- yield call(new BucketTrimShardCollectCR(store, *pbucket_info, min_markers));
+ yield call(new BucketTrimShardCollectCR(dpp, store, *pbucket_info, min_markers));
// ENODATA just means there were no keys to trim
if (retcode == -ENODATA) {
retcode = 0;
}
if (retcode < 0) {
- ldout(cct, 4) << "failed to trim bilog shards: "
+ ldpp_dout(dpp, 4) << "failed to trim bilog shards: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
const std::string start_marker;
MetadataListCallback callback;
- int _send_request() override;
+ int _send_request(const DoutPrefixProvider *dpp) override;
public:
AsyncMetadataList(CephContext *cct, RGWCoroutine *caller,
RGWAioCompletionNotifier *cn, RGWMetadataManager *mgr,
{}
};
-int AsyncMetadataList::_send_request()
+int AsyncMetadataList::_send_request(const DoutPrefixProvider *dpp)
{
void* handle = nullptr;
std::list<std::string> keys;
std::string marker;
// start a listing at the given marker
- int r = mgr->list_keys_init(section, start_marker, &handle);
+ int r = mgr->list_keys_init(dpp, section, start_marker, &handle);
if (r == -EINVAL) {
// restart with empty marker below
} else if (r < 0) {
- ldout(cct, 10) << "failed to init metadata listing: "
+ ldpp_dout(dpp, 10) << "failed to init metadata listing: "
<< cpp_strerror(r) << dendl;
return r;
} else {
- ldout(cct, 20) << "starting metadata listing at " << start_marker << dendl;
+ ldpp_dout(dpp, 20) << "starting metadata listing at " << start_marker << dendl;
// release the handle when scope exits
auto g = make_scope_guard([=] { mgr->list_keys_complete(handle); });
// get the next key and marker
r = mgr->list_keys_next(handle, 1, keys, &truncated);
if (r < 0) {
- ldout(cct, 10) << "failed to list metadata: "
+ ldpp_dout(dpp, 10) << "failed to list metadata: "
<< cpp_strerror(r) << dendl;
return r;
}
// restart the listing from the beginning (empty marker)
handle = nullptr;
- r = mgr->list_keys_init(section, "", &handle);
+ r = mgr->list_keys_init(dpp, section, "", &handle);
if (r < 0) {
- ldout(cct, 10) << "failed to restart metadata listing: "
+ ldpp_dout(dpp, 10) << "failed to restart metadata listing: "
<< cpp_strerror(r) << dendl;
return r;
}
- ldout(cct, 20) << "restarting metadata listing" << dendl;
+ ldpp_dout(dpp, 20) << "restarting metadata listing" << dendl;
// release the handle when scope exits
auto g = make_scope_guard([=] { mgr->list_keys_complete(handle); });
// get the next key and marker
r = mgr->list_keys_next(handle, 1, keys, &truncated);
if (r < 0) {
- ldout(cct, 10) << "failed to list metadata: "
+ ldpp_dout(dpp, 10) << "failed to list metadata: "
<< cpp_strerror(r) << dendl;
return r;
}
request_cleanup();
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
req = new AsyncMetadataList(cct, this, stack->create_completion_notifier(),
mgr, section, start_marker, callback);
async_rados->queue(req);
observer(observer), obj(obj), counter(config.counter_size), dpp(dpp)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
const std::string BucketTrimCR::section{"bucket.instance"};
-int BucketTrimCR::operate()
+int BucketTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
start_time = ceph::mono_clock::now();
if (config.buckets_per_interval) {
// query watch/notify for hot buckets
- ldout(cct, 10) << "fetching active bucket counters" << dendl;
+ ldpp_dout(dpp, 10) << "fetching active bucket counters" << dendl;
set_status("fetching active bucket counters");
yield {
// request the top bucket counters from each peer gateway
¬ify_replies));
}
if (retcode < 0) {
- ldout(cct, 10) << "failed to fetch peer bucket counters" << dendl;
+ ldpp_dout(dpp, 10) << "failed to fetch peer bucket counters" << dendl;
return set_cr_error(retcode);
}
// read BucketTrimStatus for marker position
set_status("reading trim status");
using ReadStatus = RGWSimpleRadosReadCR<BucketTrimStatus>;
- yield call(new ReadStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
+ yield call(new ReadStatus(dpp, store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
&status, true, &objv));
if (retcode < 0) {
- ldout(cct, 10) << "failed to read bilog trim status: "
+ ldpp_dout(dpp, 10) << "failed to read bilog trim status: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (status.marker == "MAX") {
status.marker.clear(); // restart at the beginning
}
- ldout(cct, 10) << "listing cold buckets from marker="
+ ldpp_dout(dpp, 10) << "listing cold buckets from marker="
<< status.marker << dendl;
set_status("listing cold buckets for trim");
// trim bucket instances with limited concurrency
set_status("trimming buckets");
- ldout(cct, 4) << "collected " << buckets.size() << " buckets for trim" << dendl;
+ ldpp_dout(dpp, 4) << "collected " << buckets.size() << " buckets for trim" << dendl;
yield call(new BucketTrimInstanceCollectCR(store, http, observer, buckets,
config.concurrent_buckets, dpp));
// ignore errors from individual buckets
status.marker = std::move(last_cold_marker);
ldpp_dout(dpp, 20) << "writing bucket trim marker=" << status.marker << dendl;
using WriteStatus = RGWSimpleRadosWriteCR<BucketTrimStatus>;
- yield call(new WriteStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
+ yield call(new WriteStatus(dpp, store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
status, &objv));
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to write updated trim status: "
return set_cr_error(retcode);
}
- ldout(cct, 4) << "bucket index log processing completed in "
+ ldpp_dout(dpp, 4) << "bucket index log processing completed in "
<< ceph::mono_clock::now() - start_time << dendl;
return set_cr_done();
}
cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)),
dpp(dpp) {}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int BucketTrimPollCR::operate()
+int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
for (;;) {
int BucketTrimManager::init()
{
- return impl->watcher.start();
+ return impl->watcher.start(this);
}
void BucketTrimManager::on_bucket_changed(const std::string_view& bucket)
namespace {
class DatalogTrimImplCR : public RGWSimpleCoroutine {
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
int shard;
std::string* last_trim_marker;
public:
- DatalogTrimImplCR(rgw::sal::RadosStore* store, int shard,
+ DatalogTrimImplCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, int shard,
const std::string& marker, std::string* last_trim_marker)
- : RGWSimpleCoroutine(store->ctx()), store(store), shard(shard),
+ : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), shard(shard),
marker(marker), last_trim_marker(last_trim_marker) {
set_description() << "Datalog trim shard=" << shard
<< " marker=" << marker;
}
- int send_request() override {
+ int send_request(const DoutPrefixProvider *dpp) override {
set_status() << "sending request";
cn = stack->create_completion_notifier();
- return store->svc()->datalog_rados->trim_entries(shard, marker,
+ return store->svc()->datalog_rados->trim_entries(dpp, shard, marker,
cn->completion());
}
int request_complete() override {
int r = cn->completion()->get_return_value();
- ldout(cct, 20) << __PRETTY_FUNCTION__ << "(): trim of shard=" << shard
+ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << "(): trim of shard=" << shard
<< " marker=" << marker << " returned r=" << r << dendl;
set_status() << "request complete; ret=" << r;
class DataLogTrimCR : public RGWCoroutine {
using TrimCR = DatalogTrimImplCR;
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWHTTPManager *http;
const int num_shards;
int ret{0};
public:
- DataLogTrimCR(rgw::sal::RadosStore* store, RGWHTTPManager *http,
+ DataLogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http,
int num_shards, std::vector<std::string>& last_trim)
- : RGWCoroutine(store->ctx()), store(store), http(http),
+ : RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http),
num_shards(num_shards),
zone_id(store->svc()->zone->get_zone().id),
peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()),
last_trim(last_trim)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int DataLogTrimCR::operate()
+int DataLogTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
- ldout(cct, 10) << "fetching sync status for zone " << zone_id << dendl;
+ ldpp_dout(dpp, 10) << "fetching sync status for zone " << zone_id << dendl;
set_status("fetching sync status");
yield {
// query data sync status from each sync peer
auto p = peer_status.begin();
for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) {
- ldout(cct, 20) << "query sync status from " << c.first << dendl;
+ ldpp_dout(dpp, 20) << "query sync status from " << c.first << dendl;
using StatusCR = RGWReadRESTResourceCR<rgw_data_sync_status>;
spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
false);
drain_all();
if (ret < 0) {
- ldout(cct, 4) << "failed to fetch sync status from all peers" << dendl;
+ ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl;
return set_cr_error(ret);
}
- ldout(cct, 10) << "trimming log shards" << dendl;
+ ldpp_dout(dpp, 10) << "trimming log shards" << dendl;
set_status("trimming log shards");
yield {
// determine the minimum marker for each shard
if (m <= last_trim[i]) {
continue;
}
- ldout(cct, 10) << "trimming log shard " << i
+ ldpp_dout(dpp, 10) << "trimming log shard " << i
<< " at marker=" << m
<< " last_trim=" << last_trim[i] << dendl;
- spawn(new TrimCR(store, i, m, &last_trim[i]),
+ spawn(new TrimCR(dpp, store, i, m, &last_trim[i]),
true);
}
}
return 0;
}
-RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RadosStore* store,
+RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards,
std::vector<std::string>& markers)
{
- return new DataLogTrimCR(store, http, num_shards, markers);
+ return new DataLogTrimCR(dpp, store, http, num_shards, markers);
}
class DataLogTrimPollCR : public RGWCoroutine {
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWHTTPManager *http;
const int num_shards;
std::vector<std::string> last_trim; //< last trimmed marker per shard
public:
- DataLogTrimPollCR(rgw::sal::RadosStore* store, RGWHTTPManager *http,
+ DataLogTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http,
int num_shards, utime_t interval)
- : RGWCoroutine(store->ctx()), store(store), http(http),
+ : RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http),
num_shards(num_shards), interval(interval),
lock_oid(store->svc()->datalog_rados->get_oid(0, 0)),
lock_cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)),
last_trim(num_shards)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int DataLogTrimPollCR::operate()
+int DataLogTrimPollCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
for (;;) {
interval.sec()));
if (retcode < 0) {
// if the lock is already held, go back to sleep and try again later
- ldout(cct, 4) << "failed to lock " << lock_oid << ", trying again in "
+ ldpp_dout(dpp, 4) << "failed to lock " << lock_oid << ", trying again in "
<< interval.sec() << "s" << dendl;
continue;
}
set_status("trimming");
- yield call(new DataLogTrimCR(store, http, num_shards, last_trim));
+ yield call(new DataLogTrimCR(dpp, store, http, num_shards, last_trim));
// note that the lock is not released. this is intentional, as it avoids
// duplicating this work in other gateways
return 0;
}
-RGWCoroutine* create_data_log_trim_cr(rgw::sal::RadosStore* store,
+RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards, utime_t interval)
{
- return new DataLogTrimPollCR(store, http, num_shards, interval);
+ return new DataLogTrimPollCR(dpp, store, http, num_shards, interval);
}
#include <string>
#include <vector>
+#include "common/dout.h"
+
class RGWCoroutine;
class RGWRados;
class RGWHTTPManager;
} }
// DataLogTrimCR factory function
-extern RGWCoroutine* create_data_log_trim_cr(rgw::sal::RadosStore* store,
+extern RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards, utime_t interval);
// factory function for datalog trim via radosgw-admin
-RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RadosStore* store,
+RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards,
std::vector<std::string>& markers);
RGWSI_Zone *zone;
RGWSI_MDLog *mdlog;
} svc;
+ const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* const store;
RGWMetadataManager *const metadata;
RGWObjVersionTracker objv;
epoch_t *last_trim_epoch; //< update last trim on success
public:
- PurgePeriodLogsCR(rgw::sal::RadosStore* store, epoch_t realm_epoch, epoch_t *last_trim)
- : RGWCoroutine(store->ctx()), store(store), metadata(store->ctl()->meta.mgr),
+ PurgePeriodLogsCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, epoch_t realm_epoch, epoch_t *last_trim)
+ : RGWCoroutine(store->ctx()), dpp(dpp), store(store), metadata(store->ctl()->meta.mgr),
realm_epoch(realm_epoch), last_trim_epoch(last_trim) {
svc.zone = store->svc()->zone;
svc.mdlog = store->svc()->mdlog;
}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int PurgePeriodLogsCR::operate()
+int PurgePeriodLogsCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// read our current oldest log period
- yield call(svc.mdlog->read_oldest_log_period_cr(&cursor, &objv));
+ yield call(svc.mdlog->read_oldest_log_period_cr(dpp, &cursor, &objv));
if (retcode < 0) {
return set_cr_error(retcode);
}
ceph_assert(cursor);
- ldout(cct, 20) << "oldest log realm_epoch=" << cursor.get_epoch()
+ ldpp_dout(dpp, 20) << "oldest log realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
// trim -up to- the given realm_epoch
while (cursor.get_epoch() < realm_epoch) {
- ldout(cct, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch()
+ ldpp_dout(dpp, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
yield {
const auto mdlog = svc.mdlog->get_log(cursor.get_period().get_id());
call(new PurgeLogShardsCR(store, mdlog, pool, num_shards));
}
if (retcode < 0) {
- ldout(cct, 1) << "failed to remove log shards: "
+ ldpp_dout(dpp, 1) << "failed to remove log shards: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
- ldout(cct, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch()
+ ldpp_dout(dpp, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
// update our mdlog history
- yield call(svc.mdlog->trim_log_period_cr(cursor, &objv));
+ yield call(svc.mdlog->trim_log_period_cr(dpp, cursor, &objv));
if (retcode == -ENOENT) {
// must have raced to update mdlog history. return success and allow the
// winner to continue purging
- ldout(cct, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch()
+ ldpp_dout(dpp, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
return set_cr_done();
} else if (retcode < 0) {
- ldout(cct, 1) << "failed to remove log shards for realm_epoch="
+ ldpp_dout(dpp, 1) << "failed to remove log shards for realm_epoch="
<< cursor.get_epoch() << " period=" << cursor.get_period().get_id()
<< " with: " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
if (stable <= last_trim) {
// already trimmed
- ldout(cct, 20) << "skipping log shard " << shard_id
+ ldpp_dout(env.dpp, 20) << "skipping log shard " << shard_id
<< " at marker=" << stable
<< " last_trim=" << last_trim
<< " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl;
mdlog->get_shard_oid(shard_id, oid);
- ldout(cct, 10) << "trimming log shard " << shard_id
+ ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id
<< " at marker=" << stable
<< " last_trim=" << last_trim
<< " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl;
- spawn(new RGWSyncLogTrimCR(env.store, oid, stable, &last_trim), false);
+ spawn(new RGWSyncLogTrimCR(env.dpp, env.store, oid, stable, &last_trim), false);
shard_id++;
return true;
}
: RGWCoroutine(env.store->ctx()), env(env)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int MetaMasterTrimCR::operate()
+int MetaMasterTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// TODO: detect this and fail before we spawn the trim thread?
if (env.connections.empty()) {
- ldout(cct, 4) << "no peers, exiting" << dendl;
+ ldpp_dout(dpp, 4) << "no peers, exiting" << dendl;
return set_cr_done();
}
- ldout(cct, 10) << "fetching sync status for zone " << env.zone << dendl;
+ ldpp_dout(dpp, 10) << "fetching sync status for zone " << env.zone << dendl;
// query mdlog sync status from peers
yield call(new MetaMasterStatusCollectCR(env));
// must get a successful reply from all peers to consider trimming
if (ret < 0) {
- ldout(cct, 4) << "failed to fetch sync status from all peers" << dendl;
+ ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl;
return set_cr_error(ret);
}
ret = take_min_status(env.store->ctx(), env.peer_status.begin(),
env.peer_status.end(), &min_status);
if (ret < 0) {
- ldout(cct, 4) << "failed to calculate min sync status from peers" << dendl;
+ ldpp_dout(dpp, 4) << "failed to calculate min sync status from peers" << dendl;
return set_cr_error(ret);
}
yield {
auto store = env.store;
auto epoch = min_status.sync_info.realm_epoch;
- ldout(cct, 4) << "realm epoch min=" << epoch
+ ldpp_dout(dpp, 4) << "realm epoch min=" << epoch
<< " current=" << env.current.get_epoch()<< dendl;
if (epoch > env.last_trim_epoch + 1) {
// delete any prior mdlog periods
- spawn(new PurgePeriodLogsCR(store, epoch, &env.last_trim_epoch), true);
+ spawn(new PurgePeriodLogsCR(dpp, store, epoch, &env.last_trim_epoch), true);
} else {
- ldout(cct, 10) << "mdlogs already purged up to realm_epoch "
+ ldpp_dout(dpp, 10) << "mdlogs already purged up to realm_epoch "
<< env.last_trim_epoch << dendl;
}
period_id(period_id), shard_id(shard_id), last_trim(last_trim)
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int MetaPeerTrimShardCR::operate()
+int MetaPeerTrimShardCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// query master's first mdlog entry for this shard
yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id,
"", 1, &result));
if (retcode < 0) {
- ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard "
+ ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard "
<< shard_id << " for period " << period_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
// this empty reply. query the mdlog shard info to read its max timestamp,
// then retry the listing to make sure it's still empty before trimming to
// that
- ldpp_dout(env.dpp, 10) << "empty master mdlog shard " << shard_id
+ ldpp_dout(dpp, 10) << "empty master mdlog shard " << shard_id
<< ", reading last timestamp from shard info" << dendl;
// read the mdlog shard info for the last timestamp
yield call(create_read_remote_mdlog_shard_info_cr(&env, period_id, shard_id, &info));
if (retcode < 0) {
- ldpp_dout(env.dpp, 5) << "failed to read info from master's mdlog shard "
+ ldpp_dout(dpp, 5) << "failed to read info from master's mdlog shard "
<< shard_id << " for period " << period_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
if (ceph::real_clock::is_zero(info.last_update)) {
return set_cr_done(); // nothing to trim
}
- ldpp_dout(env.dpp, 10) << "got mdlog shard info with last update="
+ ldpp_dout(dpp, 10) << "got mdlog shard info with last update="
<< info.last_update << dendl;
// re-read the master's first mdlog entry to make sure it hasn't changed
yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id,
"", 1, &result));
if (retcode < 0) {
- ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard "
+ ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard "
<< shard_id << " for period " << period_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (stable <= *last_trim) {
- ldpp_dout(env.dpp, 10) << "skipping log shard " << shard_id
+ ldpp_dout(dpp, 10) << "skipping log shard " << shard_id
<< " at timestamp=" << stable
<< " last_trim=" << *last_trim << dendl;
return set_cr_done();
}
- ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id
+ ldpp_dout(dpp, 10) << "trimming log shard " << shard_id
<< " at timestamp=" << stable
<< " last_trim=" << *last_trim << dendl;
yield {
std::string oid;
mdlog->get_shard_oid(shard_id, oid);
- call(new RGWRadosTimelogTrimCR(env.store, oid, real_time{}, stable, "", ""));
+ call(new RGWRadosTimelogTrimCR(dpp, env.store, oid, real_time{}, stable, "", ""));
}
if (retcode < 0 && retcode != -ENODATA) {
- ldpp_dout(env.dpp, 1) << "failed to trim mdlog shard " << shard_id
+ ldpp_dout(dpp, 1) << "failed to trim mdlog shard " << shard_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
public:
explicit MetaPeerTrimCR(PeerTrimEnv& env) : RGWCoroutine(env.store->ctx()), env(env) {}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int MetaPeerTrimCR::operate()
+int MetaPeerTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
- ldout(cct, 10) << "fetching master mdlog info" << dendl;
+ ldpp_dout(dpp, 10) << "fetching master mdlog info" << dendl;
yield {
// query mdlog_info from master for oldest_log_period
rgw_http_param_pair params[] = {
"/admin/log/", params, &mdlog_info));
}
if (retcode < 0) {
- ldout(cct, 4) << "failed to read mdlog info from master" << dendl;
+ ldpp_dout(dpp, 4) << "failed to read mdlog info from master" << dendl;
return set_cr_error(retcode);
}
// use master's shard count instead
if (mdlog_info.realm_epoch > env.last_trim_epoch + 1) {
// delete any prior mdlog periods
- yield call(new PurgePeriodLogsCR(env.store, mdlog_info.realm_epoch,
+ yield call(new PurgePeriodLogsCR(dpp, env.store, mdlog_info.realm_epoch,
&env.last_trim_epoch));
} else {
- ldout(cct, 10) << "mdlogs already purged through realm_epoch "
+ ldpp_dout(dpp, 10) << "mdlogs already purged through realm_epoch "
<< env.last_trim_epoch << dendl;
}
cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct))
{}
- int operate() override;
+ int operate(const DoutPrefixProvider *dpp) override;
};
-int MetaTrimPollCR::operate()
+int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
for (;;) {
while (is_truncated) {
if (bucket) {
- ret = bucket->read_usage(start_epoch, end_epoch, max_entries, &is_truncated,
+ ret = bucket->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated,
usage_iter, usage);
} else if (user) {
- ret = user->read_usage(start_epoch, end_epoch, max_entries, &is_truncated,
+ ret = user->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated,
usage_iter, usage);
} else {
- ret = store->read_all_usage(start_epoch, end_epoch, max_entries, &is_truncated,
+ ret = store->read_all_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated,
usage_iter, usage);
}
uint64_t start_epoch, uint64_t end_epoch)
{
if (bucket) {
- return bucket->trim_usage(start_epoch, end_epoch);
+ return bucket->trim_usage(dpp, start_epoch, end_epoch);
} else if (user) {
- return user->trim_usage(start_epoch, end_epoch);
+ return user->trim_usage(dpp, start_epoch, end_epoch);
} else {
- return store->trim_all_usage(start_epoch, end_epoch);
+ return store->trim_all_usage(dpp, start_epoch, end_epoch);
}
}
-int RGWUsage::clear(rgw::sal::Store* store)
+int RGWUsage::clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store)
{
- return store->clear_usage();
+ return store->clear_usage(dpp);
}
#include <map>
#include "common/Formatter.h"
+#include "common/dout.h"
#include "rgw_formats.h"
#include "rgw_user.h"
rgw::sal::User* user , rgw::sal::Bucket* bucket,
uint64_t start_epoch, uint64_t end_epoch);
- static int clear(rgw::sal::Store* store);
+ static int clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store);
};
ldpp_dout(dpp, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl;
continue;
}
- ret = bucket->sync_user_stats(y);
+ ret = bucket->sync_user_stats(dpp, y);
if (ret < 0) {
ldout(cct, 0) << "ERROR: could not sync bucket stats: ret=" << ret << dendl;
return ret;
}
} while (user_buckets.is_truncated());
- ret = user->complete_flush_stats(y);
+ ret = user->complete_flush_stats(dpp, y);
if (ret < 0) {
cerr << "ERROR: failed to complete syncing user stats: ret=" << ret << std::endl;
return ret;
return 0;
}
-int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher)
+int RGWUser::list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher)
{
Formatter *formatter = flusher.get_formatter();
void *handle = nullptr;
op_state.max_entries = 1000;
}
- int ret = store->meta_list_keys_init(metadata_key, op_state.marker, &handle);
+ int ret = store->meta_list_keys_init(dpp, metadata_key, op_state.marker, &handle);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWUserAdminOp_User::list(rgw::sal::Store* store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Store* store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUser user;
if (ret < 0)
return ret;
- ret = user.list(op_state, flusher);
+ ret = user.list(dpp, op_state, flusher);
if (ret < 0)
return ret;
RGWStorageStats stats;
RGWStorageStats *arg_stats = NULL;
if (op_state.fetch_stats) {
- int ret = ruser->read_stats(y, &stats);
+ int ret = ruser->read_stats(dpp, y, &stats);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
});
}
-int RGWUserCtl::add_bucket(const rgw_user& user,
+int RGWUserCtl::add_bucket(const DoutPrefixProvider *dpp,
+ const rgw_user& user,
const rgw_bucket& bucket,
ceph::real_time creation_time,
optional_yield y)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->add_bucket(op->ctx(), user, bucket, creation_time, y);
+ return svc.user->add_bucket(dpp, op->ctx(), user, bucket, creation_time, y);
});
}
-int RGWUserCtl::remove_bucket(const rgw_user& user,
+int RGWUserCtl::remove_bucket(const DoutPrefixProvider *dpp,
+ const rgw_user& user,
const rgw_bucket& bucket,
optional_yield y)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->remove_bucket(op->ctx(), user, bucket, y);
+ return svc.user->remove_bucket(dpp, op->ctx(), user, bucket, y);
});
}
}
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- int ret = svc.user->list_buckets(op->ctx(), user, marker, end_marker,
+ int ret = svc.user->list_buckets(dpp, op->ctx(), user, marker, end_marker,
max, buckets, is_truncated, y);
if (ret < 0) {
return ret;
});
}
-int RGWUserCtl::flush_bucket_stats(const rgw_user& user,
+int RGWUserCtl::flush_bucket_stats(const DoutPrefixProvider *dpp,
+ const rgw_user& user,
const RGWBucketEnt& ent,
optional_yield y)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->flush_bucket_stats(op->ctx(), user, ent, y);
+ return svc.user->flush_bucket_stats(dpp, op->ctx(), user, ent, y);
});
}
-int RGWUserCtl::complete_flush_stats(const rgw_user& user, optional_yield y)
+int RGWUserCtl::complete_flush_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->complete_flush_stats(op->ctx(), user, y);
+ return svc.user->complete_flush_stats(dpp, op->ctx(), user, y);
});
}
-int RGWUserCtl::reset_stats(const rgw_user& user, optional_yield y)
+int RGWUserCtl::reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->reset_bucket_stats(op->ctx(), user, y);
+ return svc.user->reset_bucket_stats(dpp, op->ctx(), user, y);
});
}
-int RGWUserCtl::read_stats(const rgw_user& user, RGWStorageStats *stats,
+int RGWUserCtl::read_stats(const DoutPrefixProvider *dpp,
+ const rgw_user& user, RGWStorageStats *stats,
optional_yield y,
ceph::real_time *last_stats_sync,
ceph::real_time *last_stats_update)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->read_stats(op->ctx(), user, stats,
+ return svc.user->read_stats(dpp, op->ctx(), user, stats,
last_stats_sync, last_stats_update, y);
});
}
-int RGWUserCtl::read_stats_async(const rgw_user& user, RGWGetUserStats_CB *cb)
+int RGWUserCtl::read_stats_async(const DoutPrefixProvider *dpp, const rgw_user& user, RGWGetUserStats_CB *cb)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->read_stats_async(op->ctx(), user, cb);
+ return svc.user->read_stats_async(dpp, op->ctx(), user, cb);
});
}
int info (RGWUserInfo& fetched_info, std::string *err_msg = NULL);
/* list the existing users */
- int list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
+ int list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
friend class RGWAccessKeyPool;
friend class RGWSubUserPool;
class RGWUserAdminOp_User
{
public:
- static int list(rgw::sal::Store* store,
+ static int list(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
static int info(const DoutPrefixProvider *dpp,
const RGWUserInfo& info, optional_yield y,
const RemoveParams& params = {});
- int add_bucket(const rgw_user& user,
+ int add_bucket(const DoutPrefixProvider *dpp,
+ const rgw_user& user,
const rgw_bucket& bucket,
ceph::real_time creation_time,
optional_yield y);
- int remove_bucket(const rgw_user& user,
+ int remove_bucket(const DoutPrefixProvider *dpp,
+ const rgw_user& user,
const rgw_bucket& bucket, optional_yield y);
int list_buckets(const DoutPrefixProvider *dpp,
const rgw_user& user,
optional_yield y,
uint64_t default_max = 1000);
- int flush_bucket_stats(const rgw_user& user,
+ int flush_bucket_stats(const DoutPrefixProvider *dpp,
+ const rgw_user& user,
const RGWBucketEnt& ent,
optional_yield y);
- int complete_flush_stats(const rgw_user& user, optional_yield y);
- int reset_stats(const rgw_user& user, optional_yield y);
- int read_stats(const rgw_user& user, RGWStorageStats *stats,
+ int complete_flush_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y);
+ int reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y);
+ int read_stats(const DoutPrefixProvider *dpp,
+ const rgw_user& user, RGWStorageStats *stats,
optional_yield y,
ceph::real_time *last_stats_sync = nullptr, /* last time a full stats sync completed */
ceph::real_time *last_stats_update = nullptr); /* last time a stats update was done */
- int read_stats_async(const rgw_user& user, RGWGetUserStats_CB *ctx);
+ int read_stats_async(const DoutPrefixProvider *dpp, const rgw_user& user, RGWGetUserStats_CB *ctx);
};
class RGWUserMetaHandlerAllocator {
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
+#define dout_subsys ceph_subsys_rgw
+
class RGWRados;
class RGWRadosThread {
- class Worker : public Thread {
+ class Worker : public Thread, public DoutPrefixProvider {
CephContext *cct;
RGWRadosThread *processor;
ceph::mutex lock = ceph::make_mutex("RGWRadosThread::Worker");
std::lock_guard l{lock};
cond.notify_all();
}
+
+ CephContext *get_cct() const { return cct; }
+ unsigned get_subsys() const { return dout_subsys; }
+ std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw rados thread: "; }
+
};
Worker *worker;
stop();
}
- virtual int init() { return 0; }
- virtual int process() = 0;
+ virtual int init(const DoutPrefixProvider *dpp) { return 0; }
+ virtual int process(const DoutPrefixProvider *dpp) = 0;
bool going_down() { return down_flag; }
RGWZoneParams zone_params(default_zone_name);
- int r = zone_params.init(cct, sysobj_svc, y, false);
+ int r = zone_params.init(dpp, cct, sysobj_svc, y, false);
if (r < 0) {
ldpp_dout(dpp, 0) << "create_default: error initializing zone params: " << cpp_strerror(-r) << dendl;
return r;
} else if (r == -EEXIST) {
ldpp_dout(dpp, 10) << "zone_params::create_default() returned -EEXIST, we raced with another default zone_params creation" << dendl;
zone_params.clear_id();
- r = zone_params.init(cct, sysobj_svc, y);
+ r = zone_params.init(dpp, cct, sysobj_svc, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "create_default: error in init existing zone params: " << cpp_strerror(-r) << dendl;
return r;
if (r == -EEXIST) {
ldpp_dout(dpp, 10) << "create_default() returned -EEXIST, we raced with another zonegroup creation" << dendl;
id.clear();
- r = init(cct, sysobj_svc, y);
+ r = init(dpp, cct, sysobj_svc, y);
if (r < 0) {
return r;
}
name = id;
}
- post_process_params(y);
+ post_process_params(dpp, y);
return 0;
}
return (id == other_zonegroup);
}
-int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
+int RGWZoneGroup::add_zone(const DoutPrefixProvider *dpp,
+ const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
const list<string>& endpoints, const string *ptier_type,
bool *psync_from_all, list<string>& sync_from, list<string>& sync_from_rm,
string *predirect_zone, std::optional<int> bucket_index_max_shards,
if (!zones.count(zone_id)) {
for (const auto& zone : zones) {
if (zone.second.name == zone_name) {
- ldout(cct, 0) << "ERROR: found existing zone name " << zone_name
+ ldpp_dout(dpp, 0) << "ERROR: found existing zone name " << zone_name
<< " (" << zone.first << ") in zonegroup " << get_name() << dendl;
return -EEXIST;
}
if (is_master) {
if (*is_master) {
if (!master_zone.empty() && master_zone != zone_id) {
- ldout(cct, 0) << "NOTICE: overriding master zone: " << master_zone << dendl;
+ ldpp_dout(dpp, 0) << "NOTICE: overriding master zone: " << master_zone << dendl;
}
master_zone = zone_id;
} else if (master_zone == zone_id) {
if (ptier_type) {
zone.tier_type = *ptier_type;
if (!sync_mgr->get_module(*ptier_type, nullptr)) {
- ldout(cct, 0) << "ERROR: could not found sync module: " << *ptier_type
+ ldpp_dout(dpp, 0) << "ERROR: could not found sync module: " << *ptier_type
<< ", valid sync modules: "
<< sync_mgr->get_registered_module_names()
<< dendl;
zone.sync_from.erase(rm);
}
- post_process_params(y);
+ post_process_params(dpp, y);
- return update(y);
+ return update(dpp,y);
}
-int RGWZoneGroup::rename_zone(const RGWZoneParams& zone_params,
+int RGWZoneGroup::rename_zone(const DoutPrefixProvider *dpp,
+ const RGWZoneParams& zone_params,
optional_yield y)
{
RGWZone& zone = zones[zone_params.get_id()];
zone.name = zone_params.get_name();
- return update(y);
+ return update(dpp, y);
}
-void RGWZoneGroup::post_process_params(optional_yield y)
+void RGWZoneGroup::post_process_params(const DoutPrefixProvider *dpp, optional_yield y)
{
bool log_data = zones.size() > 1;
zone.log_data = log_data;
RGWZoneParams zone_params(zone.id, zone.name);
- int ret = zone_params.init(cct, sysobj_svc, y);
+ int ret = zone_params.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl;
continue;
}
}
}
-int RGWZoneGroup::remove_zone(const std::string& zone_id, optional_yield y)
+int RGWZoneGroup::remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y)
{
auto iter = zones.find(zone_id);
if (iter == zones.end()) {
- ldout(cct, 0) << "zone id " << zone_id << " is not a part of zonegroup "
+ ldpp_dout(dpp, 0) << "zone id " << zone_id << " is not a part of zonegroup "
<< name << dendl;
return -ENOENT;
}
zones.erase(iter);
- post_process_params(y);
+ post_process_params(dpp, y);
- return update(y);
+ return update(dpp, y);
}
-int RGWZoneGroup::read_default_id(string& default_id, optional_yield y,
+int RGWZoneGroup::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y,
bool old_format)
{
if (realm_id.empty()) {
/* try using default realm */
RGWRealm realm;
- int ret = realm.init(cct, sysobj_svc, y);
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
// no default realm exist
if (ret < 0) {
- return read_id(default_zonegroup_name, default_id, y);
+ return read_id(dpp, default_zonegroup_name, default_id, y);
}
realm_id = realm.get_id();
}
- return RGWSystemMetaObj::read_default_id(default_id, y, old_format);
+ return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format);
}
-int RGWZoneGroup::set_as_default(optional_yield y, bool exclusive)
+int RGWZoneGroup::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
{
if (realm_id.empty()) {
/* try using default realm */
RGWRealm realm;
- int ret = realm.init(cct, sysobj_svc, y);
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl;
return -EINVAL;
}
realm_id = realm.get_id();
}
- return RGWSystemMetaObj::set_as_default(y, exclusive);
+ return RGWSystemMetaObj::set_as_default(dpp, y, exclusive);
}
void RGWSystemMetaObj::reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc)
zone_svc = _sysobj_svc->get_zone_svc();
}
-int RGWSystemMetaObj::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+int RGWSystemMetaObj::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
optional_yield y,
bool setup_obj, bool old_format)
{
name = get_predefined_name(cct);
}
if (name.empty()) {
- r = use_default(y, old_format);
+ r = use_default(dpp, y, old_format);
if (r < 0) {
return r;
}
} else if (!old_format) {
- r = read_id(name, id, y);
+ r = read_id(dpp, name, id, y);
if (r < 0) {
if (r != -ENOENT) {
- ldout(cct, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl;
}
return r;
}
}
}
- return read_info(id, y, old_format);
+ return read_info(dpp, id, y, old_format);
}
-int RGWSystemMetaObj::read_default(RGWDefaultSystemMetaObjInfo& default_info,
+int RGWSystemMetaObj::read_default(const DoutPrefixProvider *dpp,
+ RGWDefaultSystemMetaObjInfo& default_info,
const string& oid, optional_yield y)
{
using ceph::decode;
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0)
return ret;
auto iter = bl.cbegin();
decode(default_info, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "error decoding data from " << pool << ":" << oid << dendl;
+ ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl;
return -EIO;
}
return 0;
}
-int RGWSystemMetaObj::read_default_id(string& default_id, optional_yield y,
+int RGWSystemMetaObj::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y,
bool old_format)
{
RGWDefaultSystemMetaObjInfo default_info;
- int ret = read_default(default_info, get_default_oid(old_format), y);
+ int ret = read_default(dpp, default_info, get_default_oid(old_format), y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWSystemMetaObj::use_default(optional_yield y, bool old_format)
+int RGWSystemMetaObj::use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
{
- return read_default_id(id, y, old_format);
+ return read_default_id(dpp, id, y, old_format);
}
-int RGWSystemMetaObj::set_as_default(optional_yield y, bool exclusive)
+int RGWSystemMetaObj::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
{
using ceph::encode;
string oid = get_default_oid();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
int ret = sysobj.wop()
.set_exclusive(exclusive)
- .write(bl, y);
+ .write(dpp, bl, y);
if (ret < 0)
return ret;
return 0;
}
-int RGWSystemMetaObj::read_id(const string& obj_name, string& object_id,
+int RGWSystemMetaObj::read_id(const DoutPrefixProvider *dpp, const string& obj_name, string& object_id,
optional_yield y)
{
using ceph::decode;
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0) {
return ret;
}
auto iter = bl.cbegin();
decode(nameToId, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl;
return -EIO;
}
object_id = nameToId.obj_id;
return 0;
}
-int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format)
+int RGWSystemMetaObj::delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
{
rgw_pool pool(get_pool(cct));
/* check to see if obj is the default */
RGWDefaultSystemMetaObjInfo default_info;
- int ret = read_default(default_info, get_default_oid(old_format), y);
+ int ret = read_default(dpp, default_info, get_default_oid(old_format), y);
if (ret < 0 && ret != -ENOENT)
return ret;
if (default_info.default_id == id || (old_format && default_info.default_id == name)) {
string oid = get_default_oid(old_format);
rgw_raw_obj default_named_obj(pool, oid);
auto sysobj = sysobj_svc->get_obj(obj_ctx, default_named_obj);
- ret = sysobj.wop().remove(y);
+ ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
}
string oid = get_names_oid_prefix() + name;
rgw_raw_obj object_name(pool, oid);
auto sysobj = sysobj_svc->get_obj(obj_ctx, object_name);
- ret = sysobj.wop().remove(y);
+ ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
}
rgw_raw_obj object_id(pool, oid);
auto sysobj = sysobj_svc->get_obj(obj_ctx, object_id);
- ret = sysobj.wop().remove(y);
+ ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl;
}
return ret;
}
-int RGWSystemMetaObj::store_name(bool exclusive, optional_yield y)
+int RGWSystemMetaObj::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
rgw_pool pool(get_pool(cct));
string oid = get_names_oid_prefix() + name;
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
return sysobj.wop()
.set_exclusive(exclusive)
- .write(bl, y);
+ .write(dpp, bl, y);
}
-int RGWSystemMetaObj::rename(const string& new_name, optional_yield y)
+int RGWSystemMetaObj::rename(const DoutPrefixProvider *dpp, const string& new_name, optional_yield y)
{
string new_id;
- int ret = read_id(new_name, new_id, y);
+ int ret = read_id(dpp, new_name, new_id, y);
if (!ret) {
return -EEXIST;
}
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
string old_name = name;
name = new_name;
- ret = update(y);
+ ret = update(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = store_name(true, y);
+ ret = store_name(dpp, true, y);
if (ret < 0) {
- ldout(cct, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
/* delete old name */
rgw_raw_obj old_name_obj(pool, oid);
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, old_name_obj);
- ret = sysobj.wop().remove(y);
+ ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
return ret;
}
-int RGWSystemMetaObj::read_info(const string& obj_id, optional_yield y,
+int RGWSystemMetaObj::read_info(const DoutPrefixProvider *dpp, const string& obj_id, optional_yield y,
bool old_format)
{
rgw_pool pool(get_pool(cct));
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid});
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0) {
- ldout(cct, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
using ceph::decode;
auto iter = bl.cbegin();
decode(*this, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl;
return -EIO;
}
return 0;
}
-int RGWSystemMetaObj::read(optional_yield y)
+int RGWSystemMetaObj::read(const DoutPrefixProvider *dpp, optional_yield y)
{
- int ret = read_id(name, id, y);
+ int ret = read_id(dpp, name, id, y);
if (ret < 0) {
return ret;
}
- return read_info(id, y);
+ return read_info(dpp, id, y);
}
int RGWSystemMetaObj::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
int ret;
/* check to see the name is not used */
- ret = read_id(name, id, y);
+ ret = read_id(dpp, name, id, y);
if (exclusive && ret == 0) {
ldpp_dout(dpp, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl;
return -EEXIST;
id = uuid_str;
}
- ret = store_info(exclusive, y);
+ ret = store_info(dpp, exclusive, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
- return store_name(exclusive, y);
+ return store_name(dpp, exclusive, y);
}
-int RGWSystemMetaObj::store_info(bool exclusive, optional_yield y)
+int RGWSystemMetaObj::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
rgw_pool pool(get_pool(cct));
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid});
return sysobj.wop()
.set_exclusive(exclusive)
- .write(bl, y);
+ .write(dpp, bl, y);
}
-int RGWSystemMetaObj::write(bool exclusive, optional_yield y)
+int RGWSystemMetaObj::write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
- int ret = store_info(exclusive, y);
+ int ret = store_info(dpp, exclusive, y);
if (ret < 0) {
- ldout(cct, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl;
return ret;
}
- ret = store_name(exclusive, y);
+ ret = store_name(dpp, exclusive, y);
if (ret < 0) {
- ldout(cct, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl;
return ret;
}
return 0;
return ret;
}
// create the control object for watch/notify
- ret = create_control(exclusive, y);
+ ret = create_control(dpp, exclusive, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR creating control for new realm " << name << ": " << cpp_strerror(-ret) << dendl;
return ret;
RGWPeriod period;
if (current_period.empty()) {
/* create new period for the realm */
- ret = period.init(cct, sysobj_svc, id, y, name, false);
+ ret = period.init(dpp, cct, sysobj_svc, id, y, name, false);
if (ret < 0 ) {
return ret;
}
}
} else {
period = RGWPeriod(current_period, 0);
- int ret = period.init(cct, sysobj_svc, id, y, name);
+ int ret = period.init(dpp, cct, sysobj_svc, id, y, name);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to init period " << current_period << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to init period " << current_period << dendl;
return ret;
}
}
- ret = set_current_period(period, y);
+ ret = set_current_period(dpp, period, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed set current period " << current_period << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed set current period " << current_period << dendl;
return ret;
}
// try to set as default. may race with another create, so pass exclusive=true
// so we don't override an existing default
- ret = set_as_default(y, true);
+ ret = set_as_default(dpp, y, true);
if (ret < 0 && ret != -EEXIST) {
- ldout(cct, 0) << "WARNING: failed to set realm as default realm, ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: failed to set realm as default realm, ret=" << ret << dendl;
}
return 0;
}
-int RGWRealm::delete_obj(optional_yield y)
+int RGWRealm::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
{
- int ret = RGWSystemMetaObj::delete_obj(y);
+ int ret = RGWSystemMetaObj::delete_obj(dpp, y);
if (ret < 0) {
return ret;
}
- return delete_control(y);
+ return delete_control(dpp, y);
}
-int RGWRealm::create_control(bool exclusive, optional_yield y)
+int RGWRealm::create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
auto pool = rgw_pool{get_pool(cct)};
auto oid = get_control_oid();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid});
return sysobj.wop()
.set_exclusive(exclusive)
- .write(bl, y);
+ .write(dpp, bl, y);
}
-int RGWRealm::delete_control(optional_yield y)
+int RGWRealm::delete_control(const DoutPrefixProvider *dpp, optional_yield y)
{
auto pool = rgw_pool{get_pool(cct)};
auto obj = rgw_raw_obj{pool, get_control_oid()};
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, obj);
- return sysobj.wop().remove(y);
+ return sysobj.wop().remove(dpp, y);
}
rgw_pool RGWRealm::get_pool(CephContext *cct) const
return realm_info_oid_prefix;
}
-int RGWRealm::set_current_period(RGWPeriod& period, optional_yield y)
+int RGWRealm::set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y)
{
// update realm epoch to match the period's
if (epoch > period.get_realm_epoch()) {
- ldout(cct, 0) << "ERROR: set_current_period with old realm epoch "
+ ldpp_dout(dpp, 0) << "ERROR: set_current_period with old realm epoch "
<< period.get_realm_epoch() << ", current epoch=" << epoch << dendl;
return -EINVAL;
}
if (epoch == period.get_realm_epoch() && current_period != period.get_id()) {
- ldout(cct, 0) << "ERROR: set_current_period with same realm epoch "
+ ldpp_dout(dpp, 0) << "ERROR: set_current_period with same realm epoch "
<< period.get_realm_epoch() << ", but different period id "
<< period.get_id() << " != " << current_period << dendl;
return -EINVAL;
epoch = period.get_realm_epoch();
current_period = period.get_id();
- int ret = update(y);
+ int ret = update(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = period.reflect(y);
+ ret = period.reflect(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl;
return ret;
}
return get_info_oid_prefix() + id + ".control";
}
-int RGWRealm::notify_zone(bufferlist& bl, optional_yield y)
+int RGWRealm::notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y)
{
rgw_pool pool{get_pool(cct)};
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, get_control_oid()});
- int ret = sysobj.wn().notify(bl, 0, nullptr, y);
+ int ret = sysobj.wn().notify(dpp, bl, 0, nullptr, y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWRealm::notify_new_period(const RGWPeriod& period, optional_yield y)
+int RGWRealm::notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y)
{
bufferlist bl;
using ceph::encode;
// reload the gateway with the new period
encode(RGWRealmNotify::Reload, bl);
- return notify_zone(bl, y);
+ return notify_zone(dpp, bl, y);
}
std::string RGWPeriodConfig::get_oid(const std::string& realm_id)
return {pool_name};
}
-int RGWPeriodConfig::read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id,
+int RGWPeriodConfig::read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id,
optional_yield y)
{
const auto& pool = get_pool(sysobj_svc->ctx());
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid});
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWPeriodConfig::write(RGWSI_SysObj *sysobj_svc,
+int RGWPeriodConfig::write(const DoutPrefixProvider *dpp,
+ RGWSI_SysObj *sysobj_svc,
const std::string& realm_id, optional_yield y)
{
const auto& pool = get_pool(sysobj_svc->ctx());
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid});
return sysobj.wop()
.set_exclusive(false)
- .write(bl, y);
+ .write(dpp, bl, y);
}
-int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+int RGWPeriod::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
const string& period_realm_id, optional_yield y,
const string& period_realm_name, bool setup_obj)
{
if (!setup_obj)
return 0;
- return init(_cct, _sysobj_svc, y, setup_obj);
+ return init(dpp, _cct, _sysobj_svc, y, setup_obj);
}
-int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+int RGWPeriod::init(const DoutPrefixProvider *dpp,
+ CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
optional_yield y, bool setup_obj)
{
cct = _cct;
if (id.empty()) {
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(cct, sysobj_svc, y);
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 0) << "RGWPeriod::init failed to init realm " << realm_name << " id " << realm_id << " : " <<
+ ldpp_dout(dpp, 0) << "RGWPeriod::init failed to init realm " << realm_name << " id " << realm_id << " : " <<
cpp_strerror(-ret) << dendl;
return ret;
}
}
if (!epoch) {
- int ret = use_latest_epoch(y);
+ int ret = use_latest_epoch(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "failed to use_latest_epoch period id " << id << " realm " << realm_name << " id " << realm_id
+ ldpp_dout(dpp, 0) << "failed to use_latest_epoch period id " << id << " realm " << realm_name << " id " << realm_id
<< " : " << cpp_strerror(-ret) << dendl;
return ret;
}
}
- return read_info(y);
+ return read_info(dpp, y);
}
return oss.str();
}
-int RGWPeriod::read_latest_epoch(RGWPeriodLatestEpochInfo& info,
+int RGWPeriod::read_latest_epoch(const DoutPrefixProvider *dpp,
+ RGWPeriodLatestEpochInfo& info,
optional_yield y,
RGWObjVersionTracker *objv)
{
bufferlist bl;
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid});
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0) {
- ldout(cct, 1) << "error read_lastest_epoch " << pool << ":" << oid << dendl;
+ ldpp_dout(dpp, 1) << "error read_lastest_epoch " << pool << ":" << oid << dendl;
return ret;
}
try {
using ceph::decode;
decode(info, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "error decoding data from " << pool << ":" << oid << dendl;
+ ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl;
return -EIO;
}
return 0;
}
-int RGWPeriod::get_latest_epoch(epoch_t& latest_epoch, optional_yield y)
+int RGWPeriod::get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& latest_epoch, optional_yield y)
{
RGWPeriodLatestEpochInfo info;
- int ret = read_latest_epoch(info, y);
+ int ret = read_latest_epoch(dpp, info, y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWPeriod::use_latest_epoch(optional_yield y)
+int RGWPeriod::use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y)
{
RGWPeriodLatestEpochInfo info;
- int ret = read_latest_epoch(info, y);
+ int ret = read_latest_epoch(dpp, info, y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWPeriod::set_latest_epoch(optional_yield y,
+int RGWPeriod::set_latest_epoch(const DoutPrefixProvider *dpp,
+ optional_yield y,
epoch_t epoch, bool exclusive,
RGWObjVersionTracker *objv)
{
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
return sysobj.wop()
.set_exclusive(exclusive)
- .write(bl, y);
+ .write(dpp, bl, y);
}
-int RGWPeriod::update_latest_epoch(epoch_t epoch, optional_yield y)
+int RGWPeriod::update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y)
{
static constexpr int MAX_RETRIES = 20;
bool exclusive = false;
// read existing epoch
- int r = read_latest_epoch(info, y, &objv);
+ int r = read_latest_epoch(dpp, info, y, &objv);
if (r == -ENOENT) {
// use an exclusive create to set the epoch atomically
exclusive = true;
- ldout(cct, 20) << "creating initial latest_epoch=" << epoch
+ ldpp_dout(dpp, 20) << "creating initial latest_epoch=" << epoch
<< " for period=" << id << dendl;
} else if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to read latest_epoch" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read latest_epoch" << dendl;
return r;
} else if (epoch <= info.epoch) {
r = -EEXIST; // fail with EEXIST if epoch is not newer
- ldout(cct, 10) << "found existing latest_epoch " << info.epoch
+ ldpp_dout(dpp, 10) << "found existing latest_epoch " << info.epoch
<< " >= given epoch " << epoch << ", returning r=" << r << dendl;
return r;
} else {
- ldout(cct, 20) << "updating latest_epoch from " << info.epoch
+ ldpp_dout(dpp, 20) << "updating latest_epoch from " << info.epoch
<< " -> " << epoch << " on period=" << id << dendl;
}
- r = set_latest_epoch(y, epoch, exclusive, &objv);
+ r = set_latest_epoch(dpp, y, epoch, exclusive, &objv);
if (r == -EEXIST) {
continue; // exclusive create raced with another update, retry
} else if (r == -ECANCELED) {
continue; // write raced with a conflicting version, retry
}
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to write latest_epoch" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to write latest_epoch" << dendl;
return r;
}
return 0; // return success
return -ECANCELED; // fail after max retries
}
-int RGWPeriod::delete_obj(optional_yield y)
+int RGWPeriod::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
{
rgw_pool pool(get_pool(cct));
rgw_raw_obj oid{pool, p.get_period_oid()};
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, oid);
- int ret = sysobj.wop().remove(y);
+ int ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "WARNING: failed to delete period object " << oid
+ ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid
<< ": " << cpp_strerror(-ret) << dendl;
}
}
rgw_raw_obj oid{pool, get_period_oid_prefix() + get_latest_epoch_oid()};
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, oid);
- int ret = sysobj.wop().remove(y);
+ int ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "WARNING: failed to delete period object " << oid
+ ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid
<< ": " << cpp_strerror(-ret) << dendl;
}
return ret;
}
-int RGWPeriod::read_info(optional_yield y)
+int RGWPeriod::read_info(const DoutPrefixProvider *dpp, optional_yield y)
{
rgw_pool pool(get_pool(cct));
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, get_period_oid()});
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0) {
- ldout(cct, 0) << "failed reading obj info from " << pool << ":" << get_period_oid() << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << get_period_oid() << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
auto iter = bl.cbegin();
decode(*this, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << get_period_oid() << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << get_period_oid() << dendl;
return -EIO;
}
period_map.id = id;
- ret = store_info(exclusive, y);
+ ret = store_info(dpp, exclusive, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = set_latest_epoch(y, epoch);
+ ret = set_latest_epoch(dpp, y, epoch);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: setting latest epoch " << id << ": " << cpp_strerror(-ret) << dendl;
}
return ret;
}
-int RGWPeriod::store_info(bool exclusive, optional_yield y)
+int RGWPeriod::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
rgw_pool pool(get_pool(cct));
auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
return sysobj.wop()
.set_exclusive(exclusive)
- .write(bl, y);
+ .write(dpp, bl, y);
}
rgw_pool RGWPeriod::get_pool(CephContext *cct) const
return rgw_pool(cct->_conf->rgw_period_root_pool);
}
-int RGWPeriod::add_zonegroup(const RGWZoneGroup& zonegroup, optional_yield y)
+int RGWPeriod::add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y)
{
if (zonegroup.realm_id != realm_id) {
return 0;
}
int ret = period_map.update(zonegroup, cct);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl;
return ret;
}
- return store_info(false, y);
+ return store_info(dpp, false, y);
}
-int RGWPeriod::update(optional_yield y)
+int RGWPeriod::update(const DoutPrefixProvider *dpp, optional_yield y)
{
auto zone_svc = sysobj_svc->get_zone_svc();
- ldout(cct, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl;
+ ldpp_dout(dpp, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl;
list<string> zonegroups;
- int ret = zone_svc->list_zonegroups(zonegroups);
+ int ret = zone_svc->list_zonegroups(dpp, zonegroups);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl;
return ret;
}
for (auto& iter : zonegroups) {
RGWZoneGroup zg(string(), iter);
- ret = zg.init(cct, sysobj_svc, y);
+ ret = zg.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl;
continue;
}
if (zg.realm_id != realm_id) {
- ldout(cct, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl;
+ ldpp_dout(dpp, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl;
continue;
}
if (zg.master_zone.empty()) {
- ldout(cct, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl;
return -EINVAL;
}
if (zg.zones.find(zg.master_zone) == zg.zones.end()) {
- ldout(cct,0) << "ERROR: zonegroup " << zg.get_name()
+ ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name()
<< " has a non existent master zone "<< dendl;
return -EINVAL;
}
}
}
- ret = period_config.read(sysobj_svc, realm_id, y);
+ ret = period_config.read(dpp, sysobj_svc, realm_id, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: failed to read period config: "
+ ldpp_dout(dpp, 0) << "ERROR: failed to read period config: "
<< cpp_strerror(ret) << dendl;
return ret;
}
return 0;
}
-int RGWPeriod::reflect(optional_yield y)
+int RGWPeriod::reflect(const DoutPrefixProvider *dpp, optional_yield y)
{
for (auto& iter : period_map.zonegroups) {
RGWZoneGroup& zg = iter.second;
zg.reinit_instance(cct, sysobj_svc);
- int r = zg.write(false, y);
+ int r = zg.write(dpp, false, y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl;
return r;
}
if (zg.is_master_zonegroup()) {
// set master as default if no default exists
- r = zg.set_as_default(y, true);
+ r = zg.set_as_default(dpp, y, true);
if (r == 0) {
- ldout(cct, 1) << "Set the period's master zonegroup " << zg.get_id()
+ ldpp_dout(dpp, 1) << "Set the period's master zonegroup " << zg.get_id()
<< " as the default" << dendl;
}
}
}
- int r = period_config.write(sysobj_svc, realm_id, y);
+ int r = period_config.write(dpp, sysobj_svc, realm_id, y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to store period config: "
+ ldpp_dout(dpp, 0) << "ERROR: failed to store period config: "
<< cpp_strerror(-r) << dendl;
return r;
}
realm_epoch++;
}
-static int read_sync_status(rgw::sal::RadosStore* store, rgw_meta_sync_status *sync_status)
+static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, rgw_meta_sync_status *sync_status)
{
// initialize a sync status manager to read the status
RGWMetaSyncStatusManager mgr(store, store->svc()->rados->get_async_processor());
- int r = mgr.init();
+ int r = mgr.init(dpp);
if (r < 0) {
return r;
}
- r = mgr.read_sync_status(sync_status);
+ r = mgr.read_sync_status(dpp, sync_status);
mgr.stop();
return r;
}
-int RGWPeriod::update_sync_status(rgw::sal::Store* store, /* for now */
+int RGWPeriod::update_sync_status(const DoutPrefixProvider *dpp,
+ rgw::sal::Store* store, /* for now */
const RGWPeriod ¤t_period,
std::ostream& error_stream,
bool force_if_stale)
{
rgw_meta_sync_status status;
- int r = read_sync_status(static_cast<rgw::sal::RadosStore*>(store), &status);
+ int r = read_sync_status(dpp, static_cast<rgw::sal::RadosStore*>(store), &status);
if (r < 0) {
- ldout(cct, 0) << "period failed to read sync status: "
+ ldpp_dout(dpp, 0) << "period failed to read sync status: "
<< cpp_strerror(-r) << dendl;
return r;
}
// did the master zone change?
if (master_zone != current_period.get_master_zone()) {
// store the current metadata sync status in the period
- int r = update_sync_status(store, current_period, error_stream, force_if_stale);
+ int r = update_sync_status(dpp, store, current_period, error_stream, force_if_stale);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
<< cpp_strerror(-r) << dendl;
return r;
}
// set as current period
- r = realm.set_current_period(*this, y);
+ r = realm.set_current_period(dpp, *this, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update realm's current period: "
<< cpp_strerror(-r) << dendl;
}
ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period "
<< id << dendl;
- realm.notify_new_period(*this, y);
+ realm.notify_new_period(dpp, *this, y);
return 0;
}
// period must be based on current epoch
set_predecessor(current_period.get_predecessor());
realm_epoch = current_period.get_realm_epoch();
// write the period to rados
- int r = store_info(false, y);
+ int r = store_info(dpp, false, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(-r) << dendl;
return r;
}
// set as latest epoch
- r = update_latest_epoch(epoch, y);
+ r = update_latest_epoch(dpp, epoch, y);
if (r == -EEXIST) {
// already have this epoch (or a more recent one)
return 0;
ldpp_dout(dpp, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl;
return r;
}
- r = reflect(y);
+ r = reflect(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl;
return r;
}
ldpp_dout(dpp, 4) << "Committed new epoch " << epoch
<< " for period " << id << dendl;
- realm.notify_new_period(*this, y);
+ realm.notify_new_period(dpp, *this, y);
return 0;
}
namespace {
-int get_zones_pool_set(CephContext* cct,
+int get_zones_pool_set(const DoutPrefixProvider *dpp,
+ CephContext* cct,
RGWSI_SysObj* sysobj_svc,
const list<string>& zones,
const string& my_zone_id,
{
for(auto const& iter : zones) {
RGWZoneParams zone(iter);
- int r = zone.init(cct, sysobj_svc, y);
+ int r = zone.init(dpp, cct, sysobj_svc, y);
if (r < 0) {
- ldout(cct, 0) << "Error: init zone " << iter << ":" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "Error: init zone " << iter << ":" << cpp_strerror(-r) << dendl;
return r;
}
if (zone.get_id() != my_zone_id) {
}
}
-int RGWZoneParams::fix_pool_names(optional_yield y)
+int RGWZoneParams::fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y)
{
list<string> zones;
- int r = zone_svc->list_zones(zones);
+ int r = zone_svc->list_zones(dpp, zones);
if (r < 0) {
- ldout(cct, 10) << "WARNING: store->list_zones() returned r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "WARNING: store->list_zones() returned r=" << r << dendl;
}
set<rgw_pool> pools;
- r = get_zones_pool_set(cct, sysobj_svc, zones, id, pools, y);
+ r = get_zones_pool_set(dpp, cct, sysobj_svc, zones, id, pools, y);
if (r < 0) {
- ldout(cct, 0) << "Error: get_zones_pool_names" << r << dendl;
+ ldpp_dout(dpp, 0) << "Error: get_zones_pool_names" << r << dendl;
return r;
}
placement_pools["default-placement"] = default_placement;
}
- r = fix_pool_names(y);
+ r = fix_pool_names(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: fix_pool_names returned r=" << r << dendl;
return r;
// try to set as default. may race with another create, so pass exclusive=true
// so we don't override an existing default
- r = set_as_default(y, true);
+ r = set_as_default(dpp, y, true);
if (r < 0 && r != -EEXIST) {
ldpp_dout(dpp, 10) << "WARNING: failed to set zone as default, r=" << r << dendl;
}
return cct->_conf->rgw_zone;
}
-int RGWZoneParams::init(CephContext *cct, RGWSI_SysObj *sysobj_svc,
+int RGWZoneParams::init(const DoutPrefixProvider *dpp,
+ CephContext *cct, RGWSI_SysObj *sysobj_svc,
optional_yield y, bool setup_obj, bool old_format)
{
if (name.empty()) {
name = cct->_conf->rgw_zone;
}
- return RGWSystemMetaObj::init(cct, sysobj_svc, y, setup_obj, old_format);
+ return RGWSystemMetaObj::init(dpp, cct, sysobj_svc, y, setup_obj, old_format);
}
-int RGWZoneParams::read_default_id(string& default_id, optional_yield y,
+int RGWZoneParams::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y,
bool old_format)
{
if (realm_id.empty()) {
/* try using default realm */
RGWRealm realm;
- int ret = realm.init(cct, sysobj_svc, y);
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
//no default realm exist
if (ret < 0) {
- return read_id(default_zone_name, default_id, y);
+ return read_id(dpp, default_zone_name, default_id, y);
}
realm_id = realm.get_id();
}
- return RGWSystemMetaObj::read_default_id(default_id, y, old_format);
+ return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format);
}
-int RGWZoneParams::set_as_default(optional_yield y, bool exclusive)
+int RGWZoneParams::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
{
if (realm_id.empty()) {
/* try using default realm */
RGWRealm realm;
- int ret = realm.init(cct, sysobj_svc, y);
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl;
return -EINVAL;
}
realm_id = realm.get_id();
}
- return RGWSystemMetaObj::set_as_default(y, exclusive);
+ return RGWSystemMetaObj::set_as_default(dpp, y, exclusive);
}
const string& RGWZoneParams::get_compression_type(const rgw_placement_rule& placement_rule) const
return i->second;
}
-int RGWZoneGroupMap::read(CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y)
+int RGWZoneGroupMap::read(const DoutPrefixProvider *dpp, CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y)
{
RGWPeriod period;
- int ret = period.init(cct, sysobj_svc, y);
+ int ret = period.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
cerr << "failed to read current period info: " << cpp_strerror(ret);
return ret;
RGWSI_SysObj *sysobj_svc{nullptr};
RGWSI_Zone *zone_svc{nullptr};
- int store_name(bool exclusive, optional_yield y);
- int store_info(bool exclusive, optional_yield y);
- int read_info(const std::string& obj_id, optional_yield y, bool old_format = false);
- int read_id(const std::string& obj_name, std::string& obj_id, optional_yield y);
- int read_default(RGWDefaultSystemMetaObjInfo& default_info,
+ int store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+ int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+ int read_info(const DoutPrefixProvider *dpp, const std::string& obj_id, optional_yield y, bool old_format = false);
+ int read_id(const DoutPrefixProvider *dpp, const std::string& obj_name, std::string& obj_id, optional_yield y);
+ int read_default(const DoutPrefixProvider *dpp,
+ RGWDefaultSystemMetaObjInfo& default_info,
const std::string& oid,
optional_yield y);
/* read and use default id */
- int use_default(optional_yield y, bool old_format = false);
+ int use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
public:
RGWSystemMetaObj() {}
}
void reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc);
- int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+ int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
optional_yield y,
bool setup_obj = true, bool old_format = false);
- virtual int read_default_id(std::string& default_id, optional_yield y,
+ virtual int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y,
bool old_format = false);
- virtual int set_as_default(optional_yield y, bool exclusive = false);
+ virtual int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false);
int delete_default();
virtual int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true);
- int delete_obj(optional_yield y, bool old_format = false);
- int rename(const std::string& new_name, optional_yield y);
- int update(optional_yield y) { return store_info(false, y);}
- int update_name(optional_yield y) { return store_name(false, y);}
- int read(optional_yield y);
- int write(bool exclusive, optional_yield y);
+ int delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
+ int rename(const DoutPrefixProvider *dpp, const std::string& new_name, optional_yield y);
+ int update(const DoutPrefixProvider *dpp, optional_yield y) { return store_info(dpp, false, y);}
+ int update_name(const DoutPrefixProvider *dpp, optional_yield y) { return store_name(dpp, false, y);}
+ int read(const DoutPrefixProvider *dpp, optional_yield y);
+ int write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
virtual rgw_pool get_pool(CephContext *cct) const = 0;
virtual const std::string get_default_oid(bool old_format = false) const = 0;
const std::string& get_info_oid_prefix(bool old_format = false) const override;
const std::string& get_predefined_name(CephContext *cct) const override;
- int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y,
+ int init(const DoutPrefixProvider *dpp,
+ CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y,
bool setup_obj = true, bool old_format = false);
using RGWSystemMetaObj::init;
- int read_default_id(std::string& default_id, optional_yield y, bool old_format = false) override;
- int set_as_default(optional_yield y, bool exclusive = false) override;
+ int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override;
+ int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override;
int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override;
- int fix_pool_names(optional_yield y);
+ int fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y);
const string& get_compression_type(const rgw_placement_rule& placement_rule) const;
realm_id(_realm_id) {}
bool is_master_zonegroup() const { return is_master;}
- void update_master(bool _is_master, optional_yield y) {
+ void update_master(const DoutPrefixProvider *dpp, bool _is_master, optional_yield y) {
is_master = _is_master;
- post_process_params(y);
+ post_process_params(dpp, y);
}
- void post_process_params(optional_yield y);
+ void post_process_params(const DoutPrefixProvider *dpp, optional_yield y);
void encode(bufferlist& bl) const override {
ENCODE_START(5, 1, bl);
DECODE_FINISH(bl);
}
- int read_default_id(std::string& default_id, optional_yield y, bool old_format = false) override;
- int set_as_default(optional_yield y, bool exclusive = false) override;
+ int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override;
+ int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override;
int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
int equals(const std::string& other_zonegroup) const;
- int add_zone(const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
+ int add_zone(const DoutPrefixProvider *dpp,
+ const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
const list<std::string>& endpoints, const std::string *ptier_type,
bool *psync_from_all, list<std::string>& sync_from,
list<std::string>& sync_from_rm, std::string *predirect_zone,
std::optional<int> bucket_index_max_shards, RGWSyncModulesManager *sync_mgr,
optional_yield y);
- int remove_zone(const std::string& zone_id, optional_yield y);
- int rename_zone(const RGWZoneParams& zone_params, optional_yield y);
+ int remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y);
+ int rename_zone(const DoutPrefixProvider *dpp, const RGWZoneParams& zone_params, optional_yield y);
rgw_pool get_pool(CephContext *cct) const override;
const std::string get_default_oid(bool old_region_format = false) const override;
const std::string& get_info_oid_prefix(bool old_region_format = false) const override;
// the period config must be stored in a local object outside of the period,
// so that it can be used in a default configuration where no realm/period
// exists
- int read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
- int write(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
+ int read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
+ int write(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
static std::string get_oid(const std::string& realm_id);
static rgw_pool get_pool(CephContext *cct);
RGWQuotaInfo user_quota;
/* construct the map */
- int read(CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y);
+ int read(const DoutPrefixProvider *dpp, CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y);
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& bl);
std::string current_period;
epoch_t epoch{0}; //< realm epoch, incremented for each new period
- int create_control(bool exclusive, optional_yield y);
- int delete_control(optional_yield y);
+ int create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+ int delete_control(const DoutPrefixProvider *dpp, optional_yield y);
public:
RGWRealm() {}
RGWRealm(const std::string& _id, const std::string& _name = "") : RGWSystemMetaObj(_id, _name) {}
}
int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override;
- int delete_obj(optional_yield y);
+ int delete_obj(const DoutPrefixProvider *dpp, optional_yield y);
rgw_pool get_pool(CephContext *cct) const override;
const std::string get_default_oid(bool old_format = false) const override;
const std::string& get_names_oid_prefix() const override;
const std::string& get_current_period() const {
return current_period;
}
- int set_current_period(RGWPeriod& period, optional_yield y);
+ int set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y);
void clear_current_period_and_epoch() {
current_period.clear();
epoch = 0;
std::string get_control_oid() const;
/// send a notify on the realm control object
- int notify_zone(bufferlist& bl, optional_yield y);
+ int notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y);
/// notify the zone of a new period
- int notify_new_period(const RGWPeriod& period, optional_yield y);
+ int notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y);
};
WRITE_CLASS_ENCODER(RGWRealm)
CephContext *cct{nullptr};
RGWSI_SysObj *sysobj_svc{nullptr};
- int read_info(optional_yield y);
- int read_latest_epoch(RGWPeriodLatestEpochInfo& epoch_info,
+ int read_info(const DoutPrefixProvider *dpp, optional_yield y);
+ int read_latest_epoch(const DoutPrefixProvider *dpp,
+ RGWPeriodLatestEpochInfo& epoch_info,
optional_yield y,
RGWObjVersionTracker *objv = nullptr);
- int use_latest_epoch(optional_yield y);
+ int use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y);
int use_current_period();
const std::string get_period_oid() const;
const std::string get_period_oid_prefix() const;
// gather the metadata sync status for each shard; only for use on master zone
- int update_sync_status(rgw::sal::Store* store,
+ int update_sync_status(const DoutPrefixProvider *dpp,
+ rgw::sal::Store* store,
const RGWPeriod ¤t_period,
std::ostream& error_stream, bool force_if_stale);
realm_id = _realm_id;
}
- int reflect(optional_yield y);
+ int reflect(const DoutPrefixProvider *dpp, optional_yield y);
int get_zonegroup(RGWZoneGroup& zonegroup,
const std::string& zonegroup_id) const;
return false;
}
- int get_latest_epoch(epoch_t& epoch, optional_yield y);
- int set_latest_epoch(optional_yield y,
+ int get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& epoch, optional_yield y);
+ int set_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y,
epoch_t epoch, bool exclusive = false,
RGWObjVersionTracker *objv = nullptr);
// update latest_epoch if the given epoch is higher, else return -EEXIST
- int update_latest_epoch(epoch_t epoch, optional_yield y);
+ int update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y);
- int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y,
+ int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y,
const std::string &period_realm_name = "", bool setup_obj = true);
- int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true);
+ int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true);
int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true);
- int delete_obj(optional_yield y);
- int store_info(bool exclusive, optional_yield y);
- int add_zonegroup(const RGWZoneGroup& zonegroup, optional_yield y);
+ int delete_obj(const DoutPrefixProvider *dpp, optional_yield y);
+ int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+ int add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y);
void fork();
- int update(optional_yield y);
+ int update(const DoutPrefixProvider *dpp, optional_yield y);
// commit a staging period; only for use on master zone
int commit(const DoutPrefixProvider *dpp,
RGWSI_BucketIndex(CephContext *cct) : RGWServiceInstance(cct) {}
virtual ~RGWSI_BucketIndex() {}
- virtual int init_index(RGWBucketInfo& bucket_info) = 0;
- virtual int clean_index(RGWBucketInfo& bucket_info) = 0;
+ virtual int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) = 0;
+ virtual int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) = 0;
- virtual int read_stats(const RGWBucketInfo& bucket_info,
+ virtual int read_stats(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWBucketEnt *stats,
optional_yield y) = 0;
svc.datalog_rados = datalog_rados_svc;
}
-int RGWSI_BucketIndex_RADOS::open_pool(const rgw_pool& pool,
+int RGWSI_BucketIndex_RADOS::open_pool(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
RGWSI_RADOS::Pool *index_pool,
bool mostly_omap)
{
*index_pool = svc.rados->pool(pool);
- return index_pool->open(RGWSI_RADOS::OpenParams()
+ return index_pool->open(dpp, RGWSI_RADOS::OpenParams()
.set_mostly_omap(mostly_omap));
}
-int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool)
{
const rgw_pool& explicit_pool = bucket_info.bucket.explicit_placement.index_pool;
if (!explicit_pool.empty()) {
- return open_pool(explicit_pool, index_pool, false);
+ return open_pool(dpp, explicit_pool, index_pool, false);
}
auto& zonegroup = svc.zone->get_zonegroup();
}
auto iter = zone_params.placement_pools.find(rule->name);
if (iter == zone_params.placement_pools.end()) {
- ldout(cct, 0) << "could not find placement rule " << *rule << " within zonegroup " << dendl;
+ ldpp_dout(dpp, 0) << "could not find placement rule " << *rule << " within zonegroup " << dendl;
return -EINVAL;
}
- int r = open_pool(iter->second.index_pool, index_pool, true);
+ int r = open_pool(dpp, iter->second.index_pool, index_pool, true);
if (r < 0)
return r;
return 0;
}
-int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
string *bucket_oid_base)
{
const rgw_bucket& bucket = bucket_info.bucket;
- int r = open_bucket_index_pool(bucket_info, index_pool);
+ int r = open_bucket_index_pool(dpp, bucket_info, index_pool);
if (r < 0)
return r;
if (bucket.bucket_id.empty()) {
- ldout(cct, 0) << "ERROR: empty bucket_id for bucket operation" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: empty bucket_id for bucket operation" << dendl;
return -EIO;
}
}
-int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
string *bucket_oid)
{
const rgw_bucket& bucket = bucket_info.bucket;
- int r = open_bucket_index_pool(bucket_info, index_pool);
+ int r = open_bucket_index_pool(dpp, bucket_info, index_pool);
if (r < 0) {
- ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned "
+ ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< r << dendl;
return r;
}
if (bucket.bucket_id.empty()) {
- ldout(cct, 0) << "ERROR: empty bucket id for bucket operation" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: empty bucket id for bucket operation" << dendl;
return -EIO;
}
}
}
-int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
std::optional<int> _shard_id,
RGWSI_RADOS::Pool *index_pool,
map<int, string> *bucket_objs,
{
int shard_id = _shard_id.value_or(-1);
string bucket_oid_base;
- int ret = open_bucket_index_base(bucket_info, index_pool, &bucket_oid_base);
+ int ret = open_bucket_index_base(dpp, bucket_info, index_pool, &bucket_oid_base);
if (ret < 0) {
- ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned "
+ ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< ret << dendl;
return ret;
}
return r;
}
-int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
const string& obj_key,
RGWSI_RADOS::Obj *bucket_obj,
int *shard_id)
RGWSI_RADOS::Pool pool;
- int ret = open_bucket_index_base(bucket_info, &pool, &bucket_oid_base);
+ int ret = open_bucket_index_base(dpp, bucket_info, &pool, &bucket_oid_base);
if (ret < 0) {
- ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned "
+ ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< ret << dendl;
return ret;
}
ret = get_bucket_index_object(bucket_oid_base, obj_key, bucket_info.layout.current_index.layout.normal.num_shards,
bucket_info.layout.current_index.layout.normal.hash_type, &oid, shard_id);
if (ret < 0) {
- ldout(cct, 10) << "get_bucket_index_object() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 10) << "get_bucket_index_object() returned ret=" << ret << dendl;
return ret;
}
return 0;
}
-int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
const rgw::bucket_index_layout_generation& idx_layout,
RGWSI_RADOS::Obj *bucket_obj)
{
RGWSI_RADOS::Pool index_pool;
string bucket_oid_base;
- int ret = open_bucket_index_base(bucket_info, &index_pool, &bucket_oid_base);
+ int ret = open_bucket_index_base(dpp, bucket_info, &index_pool, &bucket_oid_base);
if (ret < 0) {
- ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned "
+ ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< ret << dendl;
return ret;
}
return 0;
}
-int RGWSI_BucketIndex_RADOS::cls_bucket_head(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::cls_bucket_head(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
vector<rgw_bucket_dir_header> *headers,
map<int, string> *bucket_instance_ids,
{
RGWSI_RADOS::Pool index_pool;
map<int, string> oids;
- int r = open_bucket_index(bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids);
+ int r = open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids);
if (r < 0)
return r;
}
-int RGWSI_BucketIndex_RADOS::init_index(RGWBucketInfo& bucket_info)
+int RGWSI_BucketIndex_RADOS::init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info)
{
RGWSI_RADOS::Pool index_pool;
string dir_oid = dir_oid_prefix;
- int r = open_bucket_index_pool(bucket_info, &index_pool);
+ int r = open_bucket_index_pool(dpp, bucket_info, &index_pool);
if (r < 0) {
return r;
}
cct->_conf->rgw_bucket_index_max_aio)();
}
-int RGWSI_BucketIndex_RADOS::clean_index(RGWBucketInfo& bucket_info)
+int RGWSI_BucketIndex_RADOS::clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info)
{
RGWSI_RADOS::Pool index_pool;
std::string dir_oid = dir_oid_prefix;
- int r = open_bucket_index_pool(bucket_info, &index_pool);
+ int r = open_bucket_index_pool(dpp, bucket_info, &index_pool);
if (r < 0) {
return r;
}
cct->_conf->rgw_bucket_index_max_aio)();
}
-int RGWSI_BucketIndex_RADOS::read_stats(const RGWBucketInfo& bucket_info,
+int RGWSI_BucketIndex_RADOS::read_stats(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWBucketEnt *result,
optional_yield y)
{
vector<rgw_bucket_dir_header> headers;
result->bucket = bucket_info.bucket;
- int r = cls_bucket_head(bucket_info, RGW_NO_SHARD, &headers, nullptr, y);
+ int r = cls_bucket_head(dpp, bucket_info, RGW_NO_SHARD, &headers, nullptr, y);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_BucketIndex_RADOS::get_reshard_status(const RGWBucketInfo& bucket_info, list<cls_rgw_bucket_instance_entry> *status)
+int RGWSI_BucketIndex_RADOS::get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, list<cls_rgw_bucket_instance_entry> *status)
{
map<int, string> bucket_objs;
RGWSI_RADOS::Pool index_pool;
- int r = open_bucket_index(bucket_info,
+ int r = open_bucket_index(dpp, bucket_info,
std::nullopt,
&index_pool,
&bucket_objs,
int ret = cls_rgw_get_bucket_resharding(index_pool.ioctx(), i.second, &entry);
if (ret < 0 && ret != -ENOENT) {
- lderr(cct) << "ERROR: " << __func__ << ": cls_rgw_get_bucket_resharding() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": cls_rgw_get_bucket_resharding() returned ret=" << ret << dendl;
return ret;
}
int ret;
if (!new_sync_enabled) {
- ret = svc.bilog->log_stop(info, -1);
+ ret = svc.bilog->log_stop(dpp, info, -1);
} else {
- ret = svc.bilog->log_start(info, -1);
+ ret = svc.bilog->log_start(dpp, info, -1);
}
if (ret < 0) {
- lderr(cct) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl;
return ret;
}
{
friend class RGWSI_BILog_RADOS;
- int open_pool(const rgw_pool& pool,
+ int open_pool(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
RGWSI_RADOS::Pool *index_pool,
bool mostly_omap);
- int open_bucket_index_pool(const RGWBucketInfo& bucket_info,
+ int open_bucket_index_pool(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool);
- int open_bucket_index_base(const RGWBucketInfo& bucket_info,
+ int open_bucket_index_base(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
string *bucket_oid_base);
uint32_t num_shards, rgw::BucketHashType hash_type,
string *bucket_obj, int *shard_id);
- int cls_bucket_head(const RGWBucketInfo& bucket_info,
+ int cls_bucket_head(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
vector<rgw_bucket_dir_header> *headers,
map<int, string> *bucket_instance_ids,
return rgw_shards_mod(sid2, num_shards);
}
- int init_index(RGWBucketInfo& bucket_info);
- int clean_index(RGWBucketInfo& bucket_info);
+ int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info);
+ int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info);
/* RADOS specific */
- int read_stats(const RGWBucketInfo& bucket_info,
+ int read_stats(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWBucketEnt *stats,
optional_yield y) override;
- int get_reshard_status(const RGWBucketInfo& bucket_info,
+ int get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info,
std::list<cls_rgw_bucket_instance_entry> *status);
int handle_overwrite(const DoutPrefixProvider *dpp, const RGWBucketInfo& info,
const RGWBucketInfo& orig_info) override;
- int open_bucket_index_shard(const RGWBucketInfo& bucket_info,
+ int open_bucket_index_shard(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
const string& obj_key,
RGWSI_RADOS::Obj *bucket_obj,
int *shard_id);
- int open_bucket_index_shard(const RGWBucketInfo& bucket_info,
+ int open_bucket_index_shard(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
const rgw::bucket_index_layout_generation& idx_layout,
RGWSI_RADOS::Obj *bucket_obj);
- int open_bucket_index(const RGWBucketInfo& bucket_info,
+ int open_bucket_index(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
string *bucket_oid);
- int open_bucket_index(const RGWBucketInfo& bucket_info,
+ int open_bucket_index(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
std::optional<int> shard_id,
RGWSI_RADOS::Pool *index_pool,
map<int, string> *bucket_objs,
svc.bi = bi_rados_svc;
}
-int RGWSI_BILog_RADOS::log_trim(const RGWBucketInfo& bucket_info, int shard_id, string& start_marker, string& end_marker)
+int RGWSI_BILog_RADOS::log_trim(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, string& start_marker, string& end_marker)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
BucketIndexShardsManager start_marker_mgr;
BucketIndexShardsManager end_marker_mgr;
- int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
if (r < 0) {
return r;
}
cct->_conf->rgw_bucket_index_max_aio)();
}
-int RGWSI_BILog_RADOS::log_start(const RGWBucketInfo& bucket_info, int shard_id)
+int RGWSI_BILog_RADOS::log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
- int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
return CLSRGWIssueResyncBucketBILog(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)();
}
-int RGWSI_BILog_RADOS::log_stop(const RGWBucketInfo& bucket_info, int shard_id)
+int RGWSI_BILog_RADOS::log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
- int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
+ int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
}
}
-int RGWSI_BILog_RADOS::log_list(const RGWBucketInfo& bucket_info, int shard_id, string& marker, uint32_t max,
+int RGWSI_BILog_RADOS::log_list(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, string& marker, uint32_t max,
std::list<rgw_bi_log_entry>& result, bool *truncated)
{
- ldout(cct, 20) << __func__ << ": " << bucket_info.bucket << " marker " << marker << " shard_id=" << shard_id << " max " << max << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": " << bucket_info.bucket << " marker " << marker << " shard_id=" << shard_id << " max " << max << dendl;
result.clear();
RGWSI_RADOS::Pool index_pool;
map<int, string> oids;
map<int, cls_rgw_bi_log_list_ret> bi_log_lists;
- int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, nullptr);
+ int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, nullptr);
if (r < 0)
return r;
return 0;
}
-int RGWSI_BILog_RADOS::get_log_status(const RGWBucketInfo& bucket_info,
+int RGWSI_BILog_RADOS::get_log_status(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
map<int, string> *markers,
optional_yield y)
{
vector<rgw_bucket_dir_header> headers;
map<int, string> bucket_instance_ids;
- int r = svc.bi->cls_bucket_head(bucket_info, shard_id, &headers, &bucket_instance_ids, y);
+ int r = svc.bi->cls_bucket_head(dpp, bucket_info, shard_id, &headers, &bucket_instance_ids, y);
if (r < 0)
return r;
void init(RGWSI_BucketIndex_RADOS *bi_rados_svc);
- int log_start(const RGWBucketInfo& bucket_info, int shard_id);
- int log_stop(const RGWBucketInfo& bucket_info, int shard_id);
+ int log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id);
+ int log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id);
- int log_trim(const RGWBucketInfo& bucket_info,
+ int log_trim(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
std::string& start_marker,
std::string& end_marker);
- int log_list(const RGWBucketInfo& bucket_info,
+ int log_list(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
std::string& marker,
uint32_t max,
std::list<rgw_bi_log_entry>& result,
bool *truncated);
- int get_log_status(const RGWBucketInfo& bucket_info,
+ int get_log_status(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
int shard_id,
map<int, string> *markers,
optional_yield y);
}
/* chain to only bucket instance and *not* bucket entrypoint */
- if (!binfo_cache->put(svc.cache, cache_key, &e, {&ci})) {
+ if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&ci})) {
ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl;
}
*pattrs = e.attrs;
/* chain to both bucket entry point and bucket instance */
- if (!binfo_cache->put(svc.cache, cache_key, &e, {&entry_cache_info, &cache_info})) {
+ if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&entry_cache_info, &cache_info})) {
ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl;
}
int ret = svc.meta_be->put(ctx.get(), key, params, &info.objv_tracker, y, dpp);
if (ret >= 0) {
- int r = svc.bucket_sync->handle_bi_update(info,
+ int r = svc.bucket_sync->handle_bi_update(dpp, info,
orig_info.value_or(nullptr),
y);
if (r < 0) {
const DoutPrefixProvider *dpp)
{
RGWSI_MBSObj_RemoveParams params;
- int ret = svc.meta_be->remove_entry(ctx.get(), key, params, objv_tracker, y);
+ int ret = svc.meta_be->remove_entry(dpp, ctx.get(), key, params, objv_tracker, y);
if (ret < 0 &&
ret != -ENOENT) {
return ret;
}
- int r = svc.bucket_sync->handle_bi_removal(info, y);
+ int r = svc.bucket_sync->handle_bi_removal(dpp, info, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update bucket instance sync index: r=" << r << dendl;
/* returning success as index is just keeping hints, so will keep extra hints,
vector<rgw_bucket_dir_header> headers;
- int r = svc.bi->read_stats(bucket_info, ent, y);
+ int r = svc.bi->read_stats(dpp, bucket_info, ent, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_stats returned r=" << r << dendl;
return r;
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
- virtual int handle_bi_update(RGWBucketInfo& bucket_info,
+ virtual int handle_bi_update(const DoutPrefixProvider *dpp,
+ RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y) = 0;
- virtual int handle_bi_removal(const RGWBucketInfo& bucket_info,
+ virtual int handle_bi_removal(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
optional_yield y) = 0;
- virtual int get_bucket_sync_hints(const rgw_bucket& bucket,
+ virtual int get_bucket_sync_hints(const DoutPrefixProvider *dpp,
+ const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y) = 0;
rgw_raw_obj get_dests_obj(const rgw_bucket& bucket) const;
template <typename C1, typename C2>
- int update_hints(const RGWBucketInfo& bucket_info,
+ int update_hints(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
C1& added_dests,
C2& removed_dests,
C1& added_sources,
e.handler.reset(zone_policy_handler->alloc_child(bucket_info, std::move(attrs)));
- r = e.handler->init(y);
+ r = e.handler->init(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "ERROR: failed to init bucket sync policy handler: r=" << r << dendl;
return r;
return r;
}
- if (!sync_policy_cache->put(svc.cache, cache_key, &e, {&cache_info})) {
+ if (!sync_policy_cache->put(dpp, svc.cache, cache_key, &e, {&cache_info})) {
ldpp_dout(dpp, 20) << "couldn't put bucket_sync_policy cache entry, might have raced with data changes" << dendl;
}
}
template <typename C1, typename C2>
- int update(const rgw_bucket& entity,
+ int update(const DoutPrefixProvider *dpp,
+ const rgw_bucket& entity,
const RGWBucketInfo& info_source,
C1 *add,
C2 *remove,
C2 *remove,
single_instance_info *instance);
- int read(optional_yield y);
- int flush(optional_yield y);
+ int read(const DoutPrefixProvider *dpp, optional_yield y);
+ int flush(const DoutPrefixProvider *dpp, optional_yield y);
void invalidate() {
has_data = false;
WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::info_map)
template <typename C1, typename C2>
-int RGWSI_BS_SObj_HintIndexObj::update(const rgw_bucket& entity,
+int RGWSI_BS_SObj_HintIndexObj::update(const DoutPrefixProvider *dpp,
+ const rgw_bucket& entity,
const RGWBucketInfo& info_source,
C1 *add,
C2 *remove,
for (int i = 0; i < MAX_RETRIES; ++i) {
if (!has_data) {
- r = read(y);
+ r = read(dpp, y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: cannot update hint index: failed to read: r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: cannot update hint index: failed to read: r=" << r << dendl;
return r;
}
}
info.instances.erase(entity);
}
- r = flush(y);
+ r = flush(dpp, y);
if (r >= 0) {
return 0;
}
if (r != -ECANCELED) {
- ldout(cct, 0) << "ERROR: failed to flush hint index: obj=" << obj << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: obj=" << obj << " r=" << r << dendl;
return r;
}
invalidate();
}
- ldout(cct, 0) << "ERROR: failed to flush hint index: too many retries (obj=" << obj << "), likely a bug" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: too many retries (obj=" << obj << "), likely a bug" << dendl;
return -EIO;
}
}
}
-int RGWSI_BS_SObj_HintIndexObj::read(optional_yield y) {
+int RGWSI_BS_SObj_HintIndexObj::read(const DoutPrefixProvider *dpp, optional_yield y) {
RGWObjVersionTracker _ot;
bufferlist bl;
int r = sysobj.rop()
.set_objv_tracker(&_ot) /* forcing read of current version */
- .read(&bl, y);
+ .read(dpp, &bl, y);
if (r < 0 && r != -ENOENT) {
- ldout(cct, 0) << "ERROR: failed reading data (obj=" << obj << "), r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed reading data (obj=" << obj << "), r=" << r << dendl;
return r;
}
decode(info, iter);
has_data = true;
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to decode entries, ignoring" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to decode entries, ignoring" << dendl;
info.clear();
}
} else {
return 0;
}
-int RGWSI_BS_SObj_HintIndexObj::flush(optional_yield y) {
+int RGWSI_BS_SObj_HintIndexObj::flush(const DoutPrefixProvider *dpp, optional_yield y) {
int r;
if (!info.empty()) {
r = sysobj.wop()
.set_objv_tracker(&ot) /* forcing read of current version */
- .write(bl, y);
+ .write(dpp, bl, y);
} else { /* remove */
r = sysobj.wop()
.set_objv_tracker(&ot)
- .remove(y);
+ .remove(dpp, y);
}
if (r < 0) {
}
template <typename C1, typename C2>
-int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& bucket_info,
+int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
C1& added_dests,
C2& removed_dests,
C1& added_sources,
/* update our dests */
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
get_dests_obj(bucket_info.bucket));
- int r = index.update(bucket_info.bucket,
+ int r = index.update(dpp, bucket_info.bucket,
bucket_info,
&added_dests,
&removed_dests,
y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl;
return r;
}
for (auto& dest_bucket : added_dests) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_sources_obj(dest_bucket));
- int r = dep_index.update(dest_bucket,
+ int r = dep_index.update(dpp, dest_bucket,
bucket_info,
&self_entity,
static_cast<C2 *>(nullptr),
y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl;
return r;
}
}
for (auto& dest_bucket : removed_dests) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_sources_obj(dest_bucket));
- int r = dep_index.update(dest_bucket,
+ int r = dep_index.update(dpp, dest_bucket,
bucket_info,
static_cast<C1 *>(nullptr),
&self_entity,
y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl;
return r;
}
}
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
get_sources_obj(bucket_info.bucket));
/* update our sources */
- int r = index.update(bucket_info.bucket,
+ int r = index.update(dpp, bucket_info.bucket,
bucket_info,
&added_sources,
&removed_sources,
y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl;
return r;
}
for (auto& source_bucket : added_sources) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_dests_obj(source_bucket));
- int r = dep_index.update(source_bucket,
+ int r = dep_index.update(dpp, source_bucket,
bucket_info,
&self_entity,
static_cast<C2 *>(nullptr),
y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl;
return r;
}
}
for (auto& source_bucket : removed_sources) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_dests_obj(source_bucket));
- int r = dep_index.update(source_bucket,
+ int r = dep_index.update(dpp, source_bucket,
bucket_info,
static_cast<C1 *>(nullptr),
&self_entity,
y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl;
return r;
}
}
return 0;
}
-int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const RGWBucketInfo& bucket_info,
+int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
optional_yield y)
{
std::set<rgw_bucket> sources_set;
std::vector<rgw_bucket> added_sources;
std::vector<rgw_bucket> added_dests;
- return hint_index_mgr->update_hints(bucket_info,
+ return hint_index_mgr->update_hints(dpp, bucket_info,
added_dests,
removed_dests,
added_sources,
y);
}
-int RGWSI_Bucket_Sync_SObj::handle_bi_update(RGWBucketInfo& bucket_info,
+int RGWSI_Bucket_Sync_SObj::handle_bi_update(const DoutPrefixProvider *dpp,
+ RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y)
{
std::vector<rgw_bucket> removed_sources;
std::vector<rgw_bucket> added_sources;
bool found = diff_sets(orig_sources, sources, &added_sources, &removed_sources);
- ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_sources=" << orig_sources << " new_sources=" << sources << dendl;
- ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential sources added=" << added_sources << " removed=" << removed_sources << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_sources=" << orig_sources << " new_sources=" << sources << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential sources added=" << added_sources << " removed=" << removed_sources << dendl;
std::vector<rgw_bucket> removed_dests;
std::vector<rgw_bucket> added_dests;
found = found || diff_sets(orig_dests, dests, &added_dests, &removed_dests);
- ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_dests=" << orig_dests << " new_dests=" << dests << dendl;
- ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential dests added=" << added_dests << " removed=" << removed_dests << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_dests=" << orig_dests << " new_dests=" << dests << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential dests added=" << added_dests << " removed=" << removed_dests << dendl;
if (!found) {
return 0;
}
- return hint_index_mgr->update_hints(bucket_info,
+ return hint_index_mgr->update_hints(dpp, bucket_info,
dests, /* set all dests, not just the ones that were added */
removed_dests,
sources, /* set all sources, not just that the ones that were added */
y);
}
-int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const rgw_bucket& bucket,
+int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const DoutPrefixProvider *dpp,
+ const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y)
if (sources) {
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
hint_index_mgr->get_sources_obj(bucket));
- int r = index.read(y);
+ int r = index.read(dpp, y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update sources index for bucket=" << bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update sources index for bucket=" << bucket << " r=" << r << dendl;
return r;
}
if (dests) {
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
hint_index_mgr->get_dests_obj(bucket));
- int r = index.read(y);
+ int r = index.read(dpp, y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to read targets index for bucket=" << bucket << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to read targets index for bucket=" << bucket << " r=" << r << dendl;
return r;
}
optional_yield y,
const DoutPrefixProvider *dpp);
- int handle_bi_update(RGWBucketInfo& bucket_info,
+ int handle_bi_update(const DoutPrefixProvider *dpp,
+ RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y) override;
- int handle_bi_removal(const RGWBucketInfo& bucket_info,
+ int handle_bi_removal(const DoutPrefixProvider *dpp,
+ const RGWBucketInfo& bucket_info,
optional_yield y) override;
- int get_bucket_sync_hints(const rgw_bucket& bucket,
+ int get_bucket_sync_hints(const DoutPrefixProvider *dpp,
+ const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y) override;
return 0;
}
-int RGWSI_Cls::MFA::get_mfa_obj(const rgw_user& user, std::optional<RGWSI_RADOS::Obj> *obj)
+int RGWSI_Cls::MFA::get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional<RGWSI_RADOS::Obj> *obj)
{
string oid = get_mfa_oid(user);
rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid);
obj->emplace(rados_svc->obj(o));
- int r = (*obj)->open();
+ int r = (*obj)->open(dpp);
if (r < 0) {
- ldout(cct, 4) << "failed to open rados context for " << o << dendl;
+ ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl;
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref)
+int RGWSI_Cls::MFA::get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref)
{
std::optional<RGWSI_RADOS::Obj> obj;
- int r = get_mfa_obj(user, &obj);
+ int r = get_mfa_obj(dpp, user, &obj);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::check_mfa(const rgw_user& user, const string& otp_id, const string& pin, optional_yield y)
+int RGWSI_Cls::MFA::check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& otp_id, const string& pin, optional_yield y)
{
rgw_rados_ref ref;
- int r = get_mfa_ref(user, &ref);
+ int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
if (r < 0)
return r;
- ldout(cct, 20) << "OTP check, otp_id=" << otp_id << " result=" << (int)result.result << dendl;
+ ldpp_dout(dpp, 20) << "OTP check, otp_id=" << otp_id << " result=" << (int)result.result << dendl;
return (result.result == rados::cls::otp::OTP_CHECK_SUCCESS ? 0 : -EACCES);
}
op->mtime2(&mtime_ts);
}
-int RGWSI_Cls::MFA::create_mfa(const rgw_user& user, const rados::cls::otp::otp_info_t& config,
+int RGWSI_Cls::MFA::create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config,
RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y)
{
std::optional<RGWSI_RADOS::Obj> obj;
- int r = get_mfa_obj(user, &obj);
+ int r = get_mfa_obj(dpp, user, &obj);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
prepare_mfa_write(&op, objv_tracker, mtime);
rados::cls::otp::OTP::create(&op, config);
- r = obj->operate(&op, y);
+ r = obj->operate(dpp, &op, y);
if (r < 0) {
- ldout(cct, 20) << "OTP create, otp_id=" << config.id << " result=" << (int)r << dendl;
+ ldpp_dout(dpp, 20) << "OTP create, otp_id=" << config.id << " result=" << (int)r << dendl;
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::remove_mfa(const rgw_user& user, const string& id,
+int RGWSI_Cls::MFA::remove_mfa(const DoutPrefixProvider *dpp,
+ const rgw_user& user, const string& id,
RGWObjVersionTracker *objv_tracker,
const ceph::real_time& mtime,
optional_yield y)
{
std::optional<RGWSI_RADOS::Obj> obj;
- int r = get_mfa_obj(user, &obj);
+ int r = get_mfa_obj(dpp, user, &obj);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
prepare_mfa_write(&op, objv_tracker, mtime);
rados::cls::otp::OTP::remove(&op, id);
- r = obj->operate(&op, y);
+ r = obj->operate(dpp, &op, y);
if (r < 0) {
- ldout(cct, 20) << "OTP remove, otp_id=" << id << " result=" << (int)r << dendl;
+ ldpp_dout(dpp, 20) << "OTP remove, otp_id=" << id << " result=" << (int)r << dendl;
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::get_mfa(const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result,
+int RGWSI_Cls::MFA::get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result,
optional_yield y)
{
rgw_rados_ref ref;
- int r = get_mfa_ref(user, &ref);
+ int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::list_mfa(const rgw_user& user, list<rados::cls::otp::otp_info_t> *result,
+int RGWSI_Cls::MFA::list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, list<rados::cls::otp::otp_info_t> *result,
optional_yield y)
{
rgw_rados_ref ref;
- int r = get_mfa_ref(user, &ref);
+ int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::otp_get_current_time(const rgw_user& user, ceph::real_time *result,
+int RGWSI_Cls::MFA::otp_get_current_time(const DoutPrefixProvider *dpp, const rgw_user& user, ceph::real_time *result,
optional_yield y)
{
rgw_rados_ref ref;
- int r = get_mfa_ref(user, &ref);
+ int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::set_mfa(const string& oid, const list<rados::cls::otp::otp_info_t>& entries,
+int RGWSI_Cls::MFA::set_mfa(const DoutPrefixProvider *dpp, const string& oid, const list<rados::cls::otp::otp_info_t>& entries,
bool reset_obj, RGWObjVersionTracker *objv_tracker,
const real_time& mtime,
optional_yield y)
{
rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid);
auto obj = rados_svc->obj(o);
- int r = obj.open();
+ int r = obj.open(dpp);
if (r < 0) {
- ldout(cct, 4) << "failed to open rados context for " << o << dendl;
+ ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl;
return r;
}
librados::ObjectWriteOperation op;
}
prepare_mfa_write(&op, objv_tracker, mtime);
rados::cls::otp::OTP::set(&op, entries);
- r = obj.operate(&op, y);
+ r = obj.operate(dpp, &op, y);
if (r < 0) {
- ldout(cct, 20) << "OTP set entries.size()=" << entries.size() << " result=" << (int)r << dendl;
+ ldpp_dout(dpp, 20) << "OTP set entries.size()=" << entries.size() << " result=" << (int)r << dendl;
return r;
}
return 0;
}
-int RGWSI_Cls::MFA::list_mfa(const string& oid, list<rados::cls::otp::otp_info_t> *result,
+int RGWSI_Cls::MFA::list_mfa(const DoutPrefixProvider *dpp, const string& oid, list<rados::cls::otp::otp_info_t> *result,
RGWObjVersionTracker *objv_tracker, ceph::real_time *pmtime,
optional_yield y)
{
rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid);
auto obj = rados_svc->obj(o);
- int r = obj.open();
+ int r = obj.open(dpp);
if (r < 0) {
- ldout(cct, 4) << "failed to open rados context for " << o << dendl;
+ ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl;
return r;
}
auto& ref = obj.get_ref();
cls_log_add_prepare_entry(entry, utime_t(ut), section, key, bl);
}
-int RGWSI_Cls::TimeLog::init_obj(const string& oid, RGWSI_RADOS::Obj& obj)
+int RGWSI_Cls::TimeLog::init_obj(const DoutPrefixProvider *dpp, const string& oid, RGWSI_RADOS::Obj& obj)
{
rgw_raw_obj o(zone_svc->get_zone_params().log_pool, oid);
obj = rados_svc->obj(o);
- return obj.open();
+ return obj.open(dpp);
}
-int RGWSI_Cls::TimeLog::add(const string& oid,
+int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp,
+ const string& oid,
const real_time& ut,
const string& section,
const string& key,
{
RGWSI_RADOS::Obj obj;
- int r = init_obj(oid, obj);
+ int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
utime_t t(ut);
cls_log_add(op, t, section, key, bl);
- return obj.operate(&op, y);
+ return obj.operate(dpp, &op, y);
}
-int RGWSI_Cls::TimeLog::add(const string& oid,
+int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp,
+ const string& oid,
std::list<cls_log_entry>& entries,
librados::AioCompletion *completion,
bool monotonic_inc,
{
RGWSI_RADOS::Obj obj;
- int r = init_obj(oid, obj);
+ int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
cls_log_add(op, entries, monotonic_inc);
if (!completion) {
- r = obj.operate(&op, y);
+ r = obj.operate(dpp, &op, y);
} else {
r = obj.aio_operate(completion, &op);
}
return r;
}
-int RGWSI_Cls::TimeLog::list(const string& oid,
+int RGWSI_Cls::TimeLog::list(const DoutPrefixProvider *dpp,
+ const string& oid,
const real_time& start_time,
const real_time& end_time,
int max_entries, std::list<cls_log_entry>& entries,
{
RGWSI_RADOS::Obj obj;
- int r = init_obj(oid, obj);
+ int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
bufferlist obl;
- int ret = obj.operate(&op, &obl, y);
+ int ret = obj.operate(dpp, &op, &obl, y);
if (ret < 0)
return ret;
return 0;
}
-int RGWSI_Cls::TimeLog::info(const string& oid,
+int RGWSI_Cls::TimeLog::info(const DoutPrefixProvider *dpp,
+ const string& oid,
cls_log_header *header,
optional_yield y)
{
RGWSI_RADOS::Obj obj;
- int r = init_obj(oid, obj);
+ int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
bufferlist obl;
- int ret = obj.operate(&op, &obl, y);
+ int ret = obj.operate(dpp, &op, &obl, y);
if (ret < 0)
return ret;
return 0;
}
-int RGWSI_Cls::TimeLog::info_async(RGWSI_RADOS::Obj& obj,
+int RGWSI_Cls::TimeLog::info_async(const DoutPrefixProvider *dpp,
+ RGWSI_RADOS::Obj& obj,
const string& oid,
cls_log_header *header,
librados::AioCompletion *completion)
{
- int r = init_obj(oid, obj);
+ int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_Cls::TimeLog::trim(const string& oid,
+int RGWSI_Cls::TimeLog::trim(const DoutPrefixProvider *dpp,
+ const string& oid,
const real_time& start_time,
const real_time& end_time,
const string& from_marker,
{
RGWSI_RADOS::Obj obj;
- int r = init_obj(oid, obj);
+ int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
cls_log_trim(op, st, et, from_marker, to_marker);
if (!completion) {
- r = obj.operate(&op, y);
+ r = obj.operate(dpp, &op, y);
} else {
r = obj.aio_operate(completion, &op);
}
return r;
}
-int RGWSI_Cls::Lock::lock_exclusive(const rgw_pool& pool,
+int RGWSI_Cls::Lock::lock_exclusive(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
const string& oid,
timespan& duration,
string& zone_id,
std::optional<string> lock_name)
{
auto p = rados_svc->pool(pool);
- int r = p.open();
+ int r = p.open(dpp);
if (r < 0) {
return r;
}
return l.lock_exclusive(&p.ioctx(), oid);
}
-int RGWSI_Cls::Lock::unlock(const rgw_pool& pool,
+int RGWSI_Cls::Lock::unlock(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
const string& oid,
string& zone_id,
string& owner_id,
std::optional<string> lock_name)
{
auto p = rados_svc->pool(pool);
- int r = p.open();
+ int r = p.open(dpp);
if (r < 0) {
return r;
}
public:
class MFA : public ClsSubService {
- int get_mfa_obj(const rgw_user& user, std::optional<RGWSI_RADOS::Obj> *obj);
- int get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref);
+ int get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional<RGWSI_RADOS::Obj> *obj);
+ int get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref);
void prepare_mfa_write(librados::ObjectWriteOperation *op,
RGWObjVersionTracker *objv_tracker,
return string("user:") + user.to_str();
}
- int check_mfa(const rgw_user& user, const string& otp_id, const string& pin, optional_yield y);
- int create_mfa(const rgw_user& user, const rados::cls::otp::otp_info_t& config,
+ int check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& otp_id, const string& pin, optional_yield y);
+ int create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config,
RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y);
- int remove_mfa(const rgw_user& user, const string& id,
+ int remove_mfa(const DoutPrefixProvider *dpp,
+ const rgw_user& user, const string& id,
RGWObjVersionTracker *objv_tracker,
const ceph::real_time& mtime,
optional_yield y);
- int get_mfa(const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y);
- int list_mfa(const rgw_user& user, list<rados::cls::otp::otp_info_t> *result, optional_yield y);
- int otp_get_current_time(const rgw_user& user, ceph::real_time *result, optional_yield y);
- int set_mfa(const string& oid, const list<rados::cls::otp::otp_info_t>& entries,
+ int get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y);
+ int list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, list<rados::cls::otp::otp_info_t> *result, optional_yield y);
+ int otp_get_current_time(const DoutPrefixProvider *dpp, const rgw_user& user, ceph::real_time *result, optional_yield y);
+ int set_mfa(const DoutPrefixProvider *dpp, const string& oid, const list<rados::cls::otp::otp_info_t>& entries,
bool reset_obj, RGWObjVersionTracker *objv_tracker,
const real_time& mtime, optional_yield y);
- int list_mfa(const string& oid, list<rados::cls::otp::otp_info_t> *result,
+ int list_mfa(const DoutPrefixProvider *dpp, const string& oid, list<rados::cls::otp::otp_info_t> *result,
RGWObjVersionTracker *objv_tracker, ceph::real_time *pmtime, optional_yield y);
} mfa;
class TimeLog : public ClsSubService {
- int init_obj(const string& oid, RGWSI_RADOS::Obj& obj);
+ int init_obj(const DoutPrefixProvider *dpp, const string& oid, RGWSI_RADOS::Obj& obj);
public:
TimeLog(CephContext *cct): ClsSubService(cct) {}
const string& section,
const string& key,
bufferlist& bl);
- int add(const string& oid,
+ int add(const DoutPrefixProvider *dpp,
+ const string& oid,
const real_time& ut,
const string& section,
const string& key,
bufferlist& bl,
optional_yield y);
- int add(const string& oid,
+ int add(const DoutPrefixProvider *dpp,
+ const string& oid,
std::list<cls_log_entry>& entries,
librados::AioCompletion *completion,
bool monotonic_inc,
optional_yield y);
- int list(const string& oid,
+ int list(const DoutPrefixProvider *dpp,
+ const string& oid,
const real_time& start_time,
const real_time& end_time,
int max_entries, list<cls_log_entry>& entries,
string *out_marker,
bool *truncated,
optional_yield y);
- int info(const string& oid,
+ int info(const DoutPrefixProvider *dpp,
+ const string& oid,
cls_log_header *header,
optional_yield y);
- int info_async(RGWSI_RADOS::Obj& obj,
+ int info_async(const DoutPrefixProvider *dpp,
+ RGWSI_RADOS::Obj& obj,
const string& oid,
cls_log_header *header,
librados::AioCompletion *completion);
- int trim(const string& oid,
+ int trim(const DoutPrefixProvider *dpp,
+ const string& oid,
const real_time& start_time,
const real_time& end_time,
const string& from_marker,
int init_obj(const string& oid, RGWSI_RADOS::Obj& obj);
public:
Lock(CephContext *cct): ClsSubService(cct) {}
- int lock_exclusive(const rgw_pool& pool,
+ int lock_exclusive(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
const string& oid,
timespan& duration,
string& zone_id,
string& owner_id,
std::optional<string> lock_name = std::nullopt);
- int unlock(const rgw_pool& pool,
+ int unlock(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
const string& oid,
string& zone_id,
string& owner_id,
/* bad history object, remove it */
rgw_raw_obj obj(pool, oid);
auto sysobj = obj_ctx.get_obj(obj);
- ret = sysobj.wop().remove(y);
+ ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: meta history is empty, but cannot remove it (" << cpp_strerror(-ret) << ")" << dendl;
return ret;
return 0;
}
-int RGWSI_MDLog::write_history(const RGWMetadataLogHistory& state,
+int RGWSI_MDLog::write_history(const DoutPrefixProvider *dpp,
+ const RGWMetadataLogHistory& state,
RGWObjVersionTracker *objv_tracker,
optional_yield y, bool exclusive)
{
auto& pool = svc.zone->get_zone_params().log_pool;
const auto& oid = RGWMetadataLogHistory::oid;
auto obj_ctx = svc.sysobj->init_obj_ctx();
- return rgw_put_system_obj(obj_ctx, pool, oid, bl,
+ return rgw_put_system_obj(dpp, obj_ctx, pool, oid, bl,
exclusive, objv_tracker, real_time{}, y);
}
/// read the mdlog history and use it to initialize the given cursor
class ReadHistoryCR : public RGWCoroutine {
+ const DoutPrefixProvider *dpp;
Svc svc;
Cursor *cursor;
RGWObjVersionTracker *objv_tracker;
RGWAsyncRadosProcessor *async_processor;
public:
- ReadHistoryCR(const Svc& svc,
+ ReadHistoryCR(const DoutPrefixProvider *dpp,
+ const Svc& svc,
Cursor *cursor,
RGWObjVersionTracker *objv_tracker)
- : RGWCoroutine(svc.zone->ctx()), svc(svc),
+ : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc),
cursor(cursor),
objv_tracker(objv_tracker),
async_processor(svc.rados->get_async_processor())
{}
- int operate() {
+ int operate(const DoutPrefixProvider *dpp) {
reenter(this) {
yield {
rgw_raw_obj obj{svc.zone->get_zone_params().log_pool,
constexpr bool empty_on_enoent = false;
using ReadCR = RGWSimpleRadosReadCR<RGWMetadataLogHistory>;
- call(new ReadCR(async_processor, svc.sysobj, obj,
+ call(new ReadCR(dpp, async_processor, svc.sysobj, obj,
&state, empty_on_enoent, objv_tracker));
}
if (retcode < 0) {
- ldout(cct, 1) << "failed to read mdlog history: "
+ ldpp_dout(dpp, 1) << "failed to read mdlog history: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
return set_cr_error(cursor->get_error());
}
- ldout(cct, 10) << "read mdlog history with oldest period id="
+ ldpp_dout(dpp, 10) << "read mdlog history with oldest period id="
<< state.oldest_period_id << " realm_epoch="
<< state.oldest_realm_epoch << dendl;
return set_cr_done();
/// write the given cursor to the mdlog history
class WriteHistoryCR : public RGWCoroutine {
+ const DoutPrefixProvider *dpp;
Svc svc;
Cursor cursor;
RGWObjVersionTracker *objv;
RGWAsyncRadosProcessor *async_processor;
public:
- WriteHistoryCR(Svc& svc,
+ WriteHistoryCR(const DoutPrefixProvider *dpp,
+ Svc& svc,
const Cursor& cursor,
RGWObjVersionTracker *objv)
- : RGWCoroutine(svc.zone->ctx()), svc(svc),
+ : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc),
cursor(cursor), objv(objv),
async_processor(svc.rados->get_async_processor())
{}
- int operate() {
+ int operate(const DoutPrefixProvider *dpp) {
reenter(this) {
state.oldest_period_id = cursor.get_period().get_id();
state.oldest_realm_epoch = cursor.get_epoch();
RGWMetadataLogHistory::oid};
using WriteCR = RGWSimpleRadosWriteCR<RGWMetadataLogHistory>;
- call(new WriteCR(async_processor, svc.sysobj, obj, state, objv));
+ call(new WriteCR(dpp, async_processor, svc.sysobj, obj, state, objv));
}
if (retcode < 0) {
- ldout(cct, 1) << "failed to write mdlog history: "
+ ldpp_dout(dpp, 1) << "failed to write mdlog history: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
- ldout(cct, 10) << "wrote mdlog history with oldest period id="
+ ldpp_dout(dpp, 10) << "wrote mdlog history with oldest period id="
<< state.oldest_period_id << " realm_epoch="
<< state.oldest_realm_epoch << dendl;
return set_cr_done();
/// update the mdlog history to reflect trimmed logs
class TrimHistoryCR : public RGWCoroutine {
+ const DoutPrefixProvider *dpp;
Svc svc;
const Cursor cursor; //< cursor to trimmed period
RGWObjVersionTracker *objv; //< to prevent racing updates
Cursor existing; //< existing cursor read from disk
public:
- TrimHistoryCR(const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv)
- : RGWCoroutine(svc.zone->ctx()), svc(svc),
+ TrimHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv)
+ : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc),
cursor(cursor), objv(objv), next(cursor) {
next.next(); // advance past cursor
}
- int operate() {
+ int operate(const DoutPrefixProvider *dpp) {
reenter(this) {
// read an existing history, and write the new history if it's newer
- yield call(new ReadHistoryCR(svc, &existing, objv));
+ yield call(new ReadHistoryCR(dpp, svc, &existing, objv));
if (retcode < 0) {
return set_cr_error(retcode);
}
// reject older trims with ECANCELED
if (cursor.get_epoch() < existing.get_epoch()) {
- ldout(cct, 4) << "found oldest log epoch=" << existing.get_epoch()
+ ldpp_dout(dpp, 4) << "found oldest log epoch=" << existing.get_epoch()
<< ", rejecting trim at epoch=" << cursor.get_epoch() << dendl;
return set_cr_error(-ECANCELED);
}
// overwrite with updated history
- yield call(new WriteHistoryCR(svc, next, objv));
+ yield call(new WriteHistoryCR(dpp, svc, next, objv));
if (retcode < 0) {
return set_cr_error(retcode);
}
// traverse all the way back to the beginning of the period history, and
// return a cursor to the first period in a fully attached history
-Cursor RGWSI_MDLog::find_oldest_period(optional_yield y)
+Cursor RGWSI_MDLog::find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y)
{
auto cursor = period_history->get_current();
auto& predecessor = cursor.get_period().get_predecessor();
if (predecessor.empty()) {
// this is the first period, so our logs must start here
- ldout(cct, 10) << "find_oldest_period returning first "
+ ldpp_dout(dpp, 10) << "find_oldest_period returning first "
"period " << cursor.get_period().get_id() << dendl;
return cursor;
}
// pull the predecessor and add it to our history
RGWPeriod period;
- int r = period_puller->pull(predecessor, period, y);
+ int r = period_puller->pull(dpp, predecessor, period, y);
if (r < 0) {
return cursor;
}
if (!prev) {
return prev;
}
- ldout(cct, 20) << "find_oldest_period advancing to "
+ ldpp_dout(dpp, 20) << "find_oldest_period advancing to "
"predecessor period " << predecessor << dendl;
ceph_assert(cursor.has_prev());
}
cursor.prev();
}
- ldout(cct, 10) << "find_oldest_period returning empty cursor" << dendl;
+ ldpp_dout(dpp, 10) << "find_oldest_period returning empty cursor" << dendl;
return cursor;
}
if (ret == -ENOENT) {
// initialize the mdlog history and write it
ldpp_dout(dpp, 10) << "initializing mdlog history" << dendl;
- auto cursor = find_oldest_period(y);
+ auto cursor = find_oldest_period(dpp, y);
if (!cursor) {
return cursor;
}
state.oldest_period_id = cursor.get_period().get_id();
constexpr bool exclusive = true; // don't overwrite
- int ret = write_history(state, &objv, y, exclusive);
+ int ret = write_history(dpp, state, &objv, y, exclusive);
if (ret < 0 && ret != -EEXIST) {
ldpp_dout(dpp, 1) << "failed to write mdlog history: "
<< cpp_strerror(ret) << dendl;
if (cursor) {
return cursor;
} else {
- cursor = find_oldest_period(y);
+ cursor = find_oldest_period(dpp, y);
state.oldest_realm_epoch = cursor.get_epoch();
state.oldest_period_id = cursor.get_period().get_id();
- ldout(cct, 10) << "rewriting mdlog history" << dendl;
- ret = write_history(state, &objv, y);
+ ldpp_dout(dpp, 10) << "rewriting mdlog history" << dendl;
+ ret = write_history(dpp, state, &objv, y);
if (ret < 0 && ret != -ECANCELED) {
- ldout(cct, 1) << "failed to write mdlog history: "
+ ldpp_dout(dpp, 1) << "failed to write mdlog history: "
<< cpp_strerror(ret) << dendl;
return Cursor{ret};
}
// pull the oldest period by id
RGWPeriod period;
- ret = period_puller->pull(state.oldest_period_id, period, y);
+ ret = period_puller->pull(dpp, state.oldest_period_id, period, y);
if (ret < 0) {
- ldout(cct, 1) << "failed to read period id=" << state.oldest_period_id
+ ldpp_dout(dpp, 1) << "failed to read period id=" << state.oldest_period_id
<< " for mdlog history: " << cpp_strerror(ret) << dendl;
return Cursor{ret};
}
// verify its realm_epoch
if (period.get_realm_epoch() != state.oldest_realm_epoch) {
- ldout(cct, 1) << "inconsistent mdlog history: read period id="
+ ldpp_dout(dpp, 1) << "inconsistent mdlog history: read period id="
<< period.get_id() << " with realm_epoch=" << period.get_realm_epoch()
<< ", expected realm_epoch=" << state.oldest_realm_epoch << dendl;
return Cursor{-EINVAL};
}
// attach the period to our history
- return period_history->attach(std::move(period), y);
+ return period_history->attach(dpp, std::move(period), y);
}
Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp) const
return period_history->lookup(state.oldest_realm_epoch);
}
-RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(Cursor *period,
- RGWObjVersionTracker *objv) const
+RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(const DoutPrefixProvider *dpp,
+ Cursor *period, RGWObjVersionTracker *objv) const
{
- return new mdlog::ReadHistoryCR(svc, period, objv);
+ return new mdlog::ReadHistoryCR(dpp, svc, period, objv);
}
-RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(Cursor period,
- RGWObjVersionTracker *objv) const
+RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(const DoutPrefixProvider *dpp,
+ Cursor period, RGWObjVersionTracker *objv) const
{
- return new mdlog::TrimHistoryCR(svc, period, objv);
+ return new mdlog::TrimHistoryCR(dpp, svc, period, objv);
}
RGWMetadataLog* RGWSI_MDLog::get_log(const std::string& period)
return &insert.first->second;
}
-int RGWSI_MDLog::add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl)
+int RGWSI_MDLog::add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl)
{
ceph_assert(current_log); // must have called init()
- return current_log->add_entry(hash_key, section, key, bl);
+ return current_log->add_entry(dpp, hash_key, section, key, bl);
}
int RGWSI_MDLog::get_shard_id(const string& hash_key, int *shard_id)
return current_log->get_shard_id(hash_key, shard_id);
}
-int RGWSI_MDLog::pull_period(const std::string& period_id, RGWPeriod& period,
+int RGWSI_MDLog::pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
optional_yield y)
{
- return period_puller->pull(period_id, period, y);
+ return period_puller->pull(dpp, period_id, period, y);
}
// traverse all the way back to the beginning of the period history, and
// return a cursor to the first period in a fully attached history
- RGWPeriodHistory::Cursor find_oldest_period(optional_yield y);
+ RGWPeriodHistory::Cursor find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y);
/// initialize the oldest log period if it doesn't exist, and attach it to
/// our current history
/// read the oldest log period asynchronously and write its result to the
/// given cursor pointer
- RGWCoroutine* read_oldest_log_period_cr(RGWPeriodHistory::Cursor *period,
+ RGWCoroutine* read_oldest_log_period_cr(const DoutPrefixProvider *dpp,
+ RGWPeriodHistory::Cursor *period,
RGWObjVersionTracker *objv) const;
/// try to advance the oldest log period when the given period is trimmed,
/// using a rados lock to provide atomicity
- RGWCoroutine* trim_log_period_cr(RGWPeriodHistory::Cursor period,
+ RGWCoroutine* trim_log_period_cr(const DoutPrefixProvider *dpp,
+ RGWPeriodHistory::Cursor period,
RGWObjVersionTracker *objv) const;
int read_history(RGWMetadataLogHistory *state, RGWObjVersionTracker *objv_tracker,optional_yield y, const DoutPrefixProvider *dpp) const;
- int write_history(const RGWMetadataLogHistory& state,
+ int write_history(const DoutPrefixProvider *dpp,
+ const RGWMetadataLogHistory& state,
RGWObjVersionTracker *objv_tracker,
optional_yield y, bool exclusive = false);
- int add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl);
+ int add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl);
int get_shard_id(const string& hash_key, int *shard_id);
return period_history.get();
}
- int pull_period(const std::string& period_id, RGWPeriod& period, optional_yield y);
+ int pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y);
/// find or create the metadata log for the given period
RGWMetadataLog* get_log(const std::string& period);
RGWSI_MetaBackend::GetParams::~GetParams() {} // ...
RGWSI_MetaBackend::RemoveParams::~RemoveParams() {} // ...
-int RGWSI_MetaBackend::pre_modify(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_MetaBackend::pre_modify(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker,
return 0;
}
-int RGWSI_MetaBackend::post_modify(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_MetaBackend::post_modify(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker, int ret,
}
RGWMetadataLogData log_data;
- ret = pre_modify(ctx, key, log_data, objv_tracker, op_type, y);
+ ret = pre_modify(dpp, ctx, key, log_data, objv_tracker, op_type, y);
if (ret < 0) {
return ret;
}
/* cascading ret into post_modify() */
- ret = post_modify(ctx, key, log_data, objv_tracker, ret, y);
+ ret = post_modify(dpp, ctx, key, log_data, objv_tracker, ret, y);
if (ret < 0)
return ret;
const DoutPrefixProvider *dpp)
{
std::function<int()> f = [&]() {
- return put_entry(ctx, key, params, objv_tracker, y);
+ return put_entry(dpp, ctx, key, params, objv_tracker, y);
};
return do_mutate(ctx, key, params.mtime, objv_tracker,
const DoutPrefixProvider *dpp)
{
std::function<int()> f = [&]() {
- return remove_entry(ctx, key, params, objv_tracker, y);
+ return remove_entry(dpp, ctx, key, params, objv_tracker, y);
};
return do_mutate(ctx, key, params.mtime, objv_tracker,
bool generic_prepare,
const DoutPrefixProvider *dpp);
- virtual int pre_modify(Context *ctx,
+ virtual int pre_modify(const DoutPrefixProvider *dpp,
+ Context *ctx,
const std::string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker,
RGWMDLogStatus op_type,
optional_yield y);
- virtual int post_modify(Context *ctx,
+ virtual int post_modify(const DoutPrefixProvider *dpp,
+ Context *ctx,
const std::string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker, int ret,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
- virtual int put_entry(RGWSI_MetaBackend::Context *ctx,
+ virtual int put_entry(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const std::string& key,
RGWSI_MetaBackend::PutParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) = 0;
- virtual int remove_entry(Context *ctx,
+ virtual int remove_entry(const DoutPrefixProvider *dpp,
+ Context *ctx,
const std::string& key,
RGWSI_MetaBackend::RemoveParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) = 0;
- virtual int list_init(RGWSI_MetaBackend::Context *ctx, const string& marker) = 0;
+ virtual int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& marker) = 0;
virtual int list_next(RGWSI_MetaBackend::Context *ctx,
int max, list<string> *keys,
bool *truncated) = 0;
return be->mutate(be_ctx, key, params, objv_tracker, y, f, dpp);
}
- int list_init(const string& marker) {
- return be->list_init(be_ctx, marker);
+ int list_init(const DoutPrefixProvider *dpp, const string& marker) {
+ return be->list_init(dpp, be_ctx, marker);
}
int list_next(int max, list<string> *keys,
bool *truncated) {
{
RGWSI_MBOTP_GetParams& params = static_cast<RGWSI_MBOTP_GetParams&>(_params);
- int r = cls_svc->mfa.list_mfa(key, params.pdevices, objv_tracker, params.pmtime, y);
+ int r = cls_svc->mfa.list_mfa(dpp, key, params.pdevices, objv_tracker, params.pmtime, y);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_MetaBackend_OTP::put_entry(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_MetaBackend_OTP::put_entry(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
const string& key,
RGWSI_MetaBackend::PutParams& _params,
RGWObjVersionTracker *objv_tracker,
{
RGWSI_MBOTP_PutParams& params = static_cast<RGWSI_MBOTP_PutParams&>(_params);
- return cls_svc->mfa.set_mfa(key, params.devices, true, objv_tracker, params.mtime, y);
+ return cls_svc->mfa.set_mfa(dpp, key, params.devices, true, objv_tracker, params.mtime, y);
}
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
- int put_entry(RGWSI_MetaBackend::Context *ctx,
+ int put_entry(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const string& key,
RGWSI_MetaBackend::PutParams& _params,
RGWObjVersionTracker *objv_tracker,
return new Context_SObj(sysobj_svc);
}
-int RGWSI_MetaBackend_SObj::pre_modify(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_MetaBackend_SObj::pre_modify(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx,
const string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
auto ctx = static_cast<Context_SObj *>(_ctx);
- int ret = RGWSI_MetaBackend::pre_modify(ctx, key, log_data,
+ int ret = RGWSI_MetaBackend::pre_modify(dpp, ctx, key, log_data,
objv_tracker, op_type,
y);
if (ret < 0) {
bufferlist logbl;
encode(log_data, logbl);
- ret = mdlog_svc->add_entry(ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl);
+ ret = mdlog_svc->add_entry(dpp, ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl);
if (ret < 0)
return ret;
return 0;
}
-int RGWSI_MetaBackend_SObj::post_modify(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_MetaBackend_SObj::post_modify(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
const string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker, int ret,
bufferlist logbl;
encode(log_data, logbl);
- int r = mdlog_svc->add_entry(ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl);
+ int r = mdlog_svc->add_entry(dpp, ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl);
if (ret < 0)
return ret;
if (r < 0)
return r;
- return RGWSI_MetaBackend::post_modify(ctx, key, log_data, objv_tracker, ret, y);
+ return RGWSI_MetaBackend::post_modify(dpp, ctx, key, log_data, objv_tracker, ret, y);
}
int RGWSI_MetaBackend_SObj::get_shard_id(RGWSI_MetaBackend::Context *_ctx,
params.refresh_version);
}
-int RGWSI_MetaBackend_SObj::put_entry(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_MetaBackend_SObj::put_entry(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
const string& key,
PutParams& _params,
RGWObjVersionTracker *objv_tracker,
string oid;
ctx->module->get_pool_and_oid(key, &pool, &oid);
- return rgw_put_system_obj(*ctx->obj_ctx, pool, oid, params.bl, params.exclusive,
+ return rgw_put_system_obj(dpp, *ctx->obj_ctx, pool, oid, params.bl, params.exclusive,
objv_tracker, params.mtime, y, params.pattrs);
}
-int RGWSI_MetaBackend_SObj::remove_entry(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_MetaBackend_SObj::remove_entry(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
const string& key,
RemoveParams& params,
RGWObjVersionTracker *objv_tracker,
auto sysobj = ctx->obj_ctx->get_obj(k);
return sysobj.wop()
.set_objv_tracker(objv_tracker)
- .remove(y);
+ .remove(dpp, y);
}
-int RGWSI_MetaBackend_SObj::list_init(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_MetaBackend_SObj::list_init(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
const string& marker)
{
RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast<RGWSI_MetaBackend_SObj::Context_SObj *>(_ctx);
ctx->list.op.emplace(ctx->list.pool->op());
string prefix = ctx->module->get_oid_prefix();
- ctx->list.op->init(marker, prefix);
+ ctx->list.op->init(dpp, marker, prefix);
return 0;
}
int call_with_get_params(ceph::real_time *pmtime, std::function<int(RGWSI_MetaBackend::GetParams&)> cb) override;
- int pre_modify(RGWSI_MetaBackend::Context *ctx,
+ int pre_modify(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker,
RGWMDLogStatus op_type,
optional_yield y);
- int post_modify(RGWSI_MetaBackend::Context *ctx,
+ int post_modify(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const string& key,
RGWMetadataLogData& log_data,
RGWObjVersionTracker *objv_tracker, int ret,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) override;
- int put_entry(RGWSI_MetaBackend::Context *ctx,
+ int put_entry(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const string& key,
RGWSI_MetaBackend::PutParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) override;
- int remove_entry(RGWSI_MetaBackend::Context *ctx,
+ int remove_entry(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const string& key,
RGWSI_MetaBackend::RemoveParams& params,
RGWObjVersionTracker *objv_tracker,
optional_yield y) override;
- int list_init(RGWSI_MetaBackend::Context *_ctx, const string& marker) override;
+ int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& marker) override;
int list_next(RGWSI_MetaBackend::Context *_ctx,
int max, list<string> *keys,
bool *truncated) override;
static string notify_oid_prefix = "notify";
-class RGWWatcher : public librados::WatchCtx2 {
+class RGWWatcher : public DoutPrefixProvider , public librados::WatchCtx2 {
CephContext *cct;
RGWSI_Notify *svc;
int index;
watcher->reinit();
}
};
+
+ CephContext *get_cct() const { return cct; }
+ unsigned get_subsys() const { return dout_subsys; }
+ std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw watcher librados: "; }
+
public:
RGWWatcher(CephContext *_cct, RGWSI_Notify *s, int i, RGWSI_RADOS::Obj& o) : cct(_cct), svc(s), index(i), obj(o), watch_handle(0) {}
void handle_notify(uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl) override {
- ldout(cct, 10) << "RGWWatcher::handle_notify() "
+ ldpp_dout(this, 10) << "RGWWatcher::handle_notify() "
<< " notify_id " << notify_id
<< " cookie " << cookie
<< " notifier " << notifier_id
(svc->inject_notify_timeout_probability > 0 &&
(svc->inject_notify_timeout_probability >
ceph::util::generate_random_number(0.0, 1.0)))) {
- ldout(cct, 0)
+ ldpp_dout(this, 0)
<< "RGWWatcher::handle_notify() dropping notification! "
<< "If this isn't what you want, set "
<< "rgw_inject_notify_timeout_probability to zero!" << dendl;
return;
}
- svc->watch_cb(notify_id, cookie, notifier_id, bl);
+ svc->watch_cb(this, notify_id, cookie, notifier_id, bl);
bufferlist reply_bl; // empty reply payload
obj.notify_ack(notify_id, cookie, reply_bl);
return notify_objs[i];
}
-int RGWSI_Notify::init_watch(optional_yield y)
+int RGWSI_Notify::init_watch(const DoutPrefixProvider *dpp, optional_yield y)
{
num_watchers = cct->_conf->rgw_num_control_oids;
notify_objs[i] = rados_svc->handle().obj({control_pool, notify_oid});
auto& notify_obj = notify_objs[i];
- int r = notify_obj.open();
+ int r = notify_obj.open(dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: notify_obj.open() returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: notify_obj.open() returned r=" << r << dendl;
return r;
}
librados::ObjectWriteOperation op;
op.create(false);
- r = notify_obj.operate(&op, y);
+ r = notify_obj.operate(dpp, &op, y);
if (r < 0 && r != -EEXIST) {
- ldout(cct, 0) << "ERROR: notify_obj.operate() returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: notify_obj.operate() returned r=" << r << dendl;
return r;
}
r = watcher->register_watch_async();
if (r < 0) {
- ldout(cct, 0) << "WARNING: register_watch_aio() returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: register_watch_aio() returned " << r << dendl;
error = r;
continue;
}
for (int i = 0; i < num_watchers; ++i) {
int r = watchers[i]->register_watch_finish();
if (r < 0) {
- ldout(cct, 0) << "WARNING: async watch returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: async watch returned " << r << dendl;
error = r;
}
}
control_pool = zone_svc->get_zone_params().control_pool;
- int ret = init_watch(y);
+ int ret = init_watch(dpp, y);
if (ret < 0) {
lderr(cct) << "ERROR: failed to initialize watch: " << cpp_strerror(-ret) << dendl;
return ret;
}
}
-int RGWSI_Notify::watch_cb(uint64_t notify_id,
+int RGWSI_Notify::watch_cb(const DoutPrefixProvider *dpp,
+ uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl)
{
std::shared_lock l{watchers_lock};
if (cb) {
- return cb->watch_cb(notify_id, cookie, notifier_id, bl);
+ return cb->watch_cb(dpp, notify_id, cookie, notifier_id, bl);
}
return 0;
}
}
}
-int RGWSI_Notify::distribute(const string& key, bufferlist& bl,
+int RGWSI_Notify::distribute(const DoutPrefixProvider *dpp, const string& key, bufferlist& bl,
optional_yield y)
{
/* The RGW uses the control pool to store the watch notify objects.
if (num_watchers > 0) {
RGWSI_RADOS::Obj notify_obj = pick_control_obj(key);
- ldout(cct, 10) << "distributing notification oid=" << notify_obj.get_ref().obj
+ ldpp_dout(dpp, 10) << "distributing notification oid=" << notify_obj.get_ref().obj
<< " bl.length()=" << bl.length() << dendl;
- return robust_notify(notify_obj, bl, y);
+ return robust_notify(dpp, notify_obj, bl, y);
}
return 0;
}
-int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl,
+int RGWSI_Notify::robust_notify(const DoutPrefixProvider *dpp,
+ RGWSI_RADOS::Obj& notify_obj, bufferlist& bl,
optional_yield y)
{
// The reply of every machine that acks goes in here.
bufferlist rbl;
// First, try to send, without being fancy about it.
- auto r = notify_obj.notify(bl, 0, &rbl, y);
+ auto r = notify_obj.notify(dpp, bl, 0, &rbl, y);
// If that doesn't work, get serious.
if (r < 0) {
- ldout(cct, 1) << "robust_notify: If at first you don't succeed: "
+ ldpp_dout(dpp, 1) << "robust_notify: If at first you don't succeed: "
<< cpp_strerror(-r) << dendl;
std::pair<uint64_t, uint64_t> id;
decode(id, p);
acks.insert(id);
- ldout(cct, 20) << "robust_notify: acked by " << id << dendl;
+ ldpp_dout(dpp, 20) << "robust_notify: acked by " << id << dendl;
uint32_t blen;
decode(blen, p);
p += blen;
}
} catch (const buffer::error& e) {
- ldout(cct, 0) << "robust_notify: notify response parse failed: "
+ ldpp_dout(dpp, 0) << "robust_notify: notify response parse failed: "
<< e.what() << dendl;
acks.clear(); // Throw away junk on failed parse.
}
rbl.clear();
// Reset the timeouts, we're only concerned with new ones.
timeouts.clear();
- r = notify_obj.notify(bl, 0, &rbl, y);
+ r = notify_obj.notify(dpp, bl, 0, &rbl, y);
if (r < 0) {
- ldout(cct, 1) << "robust_notify: retry " << tries << " failed: "
+ ldpp_dout(dpp, 1) << "robust_notify: retry " << tries << " failed: "
<< cpp_strerror(-r) << dendl;
p = rbl.begin();
try {
decode(id, p);
auto ir = acks.insert(id);
if (ir.second) {
- ldout(cct, 20) << "robust_notify: acked by " << id << dendl;
+ ldpp_dout(dpp, 20) << "robust_notify: acked by " << id << dendl;
}
uint32_t blen;
decode(blen, p);
decode(id, p);
// Only track timeouts from hosts that haven't acked previously.
if (acks.find(id) != acks.cend()) {
- ldout(cct, 20) << "robust_notify: " << id << " timed out."
+ ldpp_dout(dpp, 20) << "robust_notify: " << id << " timed out."
<< dendl;
timeouts.insert(id);
}
}
} catch (const buffer::error& e) {
- ldout(cct, 0) << "robust_notify: notify response parse failed: "
+ ldpp_dout(dpp, 0) << "robust_notify: notify response parse failed: "
<< e.what() << dendl;
continue;
}
bool finalized{false};
- int init_watch(optional_yield y);
+ int init_watch(const DoutPrefixProvider *dpp, optional_yield y);
void finalize_watch();
void init(RGWSI_Zone *_zone_svc,
void add_watcher(int i);
void remove_watcher(int i);
- int watch_cb(uint64_t notify_id,
+ int watch_cb(const DoutPrefixProvider *dpp,
+ uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl);
void _set_enabled(bool status);
void set_enabled(bool status);
- int robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl,
+ int robust_notify(const DoutPrefixProvider *dpp,
+ RGWSI_RADOS::Obj& notify_obj, bufferlist& bl,
optional_yield y);
void schedule_context(Context *c);
class CB {
public:
virtual ~CB() {}
- virtual int watch_cb(uint64_t notify_id,
+ virtual int watch_cb(const DoutPrefixProvider *dpp,
+ uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl) = 0;
virtual void set_enabled(bool status) = 0;
};
- int distribute(const string& key, bufferlist& bl, optional_yield y);
+ int distribute(const DoutPrefixProvider *dpp, const string& key, bufferlist& bl, optional_yield y);
void register_watch_cb(CB *cb);
};
dpp);
}
-int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx,
+int RGWSI_OTP::store_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const string& key,
const otp_devices_list_t& devices,
real_time mtime,
params.mtime = mtime;
params.devices = devices;
- int ret = svc.meta_be->put_entry(ctx.get(), key, params, objv_tracker, y);
+ int ret = svc.meta_be->put_entry(dpp, ctx.get(), key, params, objv_tracker, y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx,
+int RGWSI_OTP::store_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const rgw_user& uid,
const otp_devices_list_t& devices,
real_time mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
- return store_all(ctx,
+ return store_all(dpp, ctx,
uid.to_str(),
devices,
mtime,
y);
}
-int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx,
+int RGWSI_OTP::remove_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const string& key,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
RGWSI_MBOTP_RemoveParams params;
- int ret = svc.meta_be->remove_entry(ctx.get(), key, params, objv_tracker, y);
+ int ret = svc.meta_be->remove_entry(dpp, ctx.get(), key, params, objv_tracker, y);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx,
+int RGWSI_OTP::remove_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const rgw_user& uid,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
- return remove_all(ctx,
+ return remove_all(dpp,ctx,
uid.to_str(),
objv_tracker,
y);
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp);
- int store_all(RGWSI_OTP_BE_Ctx& ctx,
+ int store_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const string& key,
const otp_devices_list_t& devices,
real_time mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- int store_all(RGWSI_OTP_BE_Ctx& ctx,
+ int store_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const rgw_user& uid,
const otp_devices_list_t& devices,
real_time mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- int remove_all(RGWSI_OTP_BE_Ctx& ctx,
+ int remove_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const string& key,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- int remove_all(RGWSI_OTP_BE_Ctx& ctx,
+ int remove_all(const DoutPrefixProvider *dpp,
+ RGWSI_OTP_BE_Ctx& ctx,
const rgw_user& uid,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
return get_rados_handle()->get_instance_id();
}
-int RGWSI_RADOS::open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx,
+int RGWSI_RADOS::open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
const OpenParams& params)
{
- return rgw_init_ioctx(get_rados_handle(), pool, io_ctx,
+ return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx,
params.create,
params.mostly_omap);
}
ref.obj = obj;
}
-int RGWSI_RADOS::Obj::open()
+int RGWSI_RADOS::Obj::open(const DoutPrefixProvider *dpp)
{
- int r = ref.pool.open();
+ int r = ref.pool.open(dpp);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_RADOS::Obj::operate(librados::ObjectWriteOperation *op,
+int RGWSI_RADOS::Obj::operate(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation *op,
optional_yield y, int flags)
{
- return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, y, flags);
+ return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, y, flags);
}
-int RGWSI_RADOS::Obj::operate(librados::ObjectReadOperation *op,
+int RGWSI_RADOS::Obj::operate(const DoutPrefixProvider *dpp, librados::ObjectReadOperation *op,
bufferlist *pbl, optional_yield y, int flags)
{
- return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, pbl, y, flags);
+ return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, pbl, y, flags);
}
int RGWSI_RADOS::Obj::aio_operate(librados::AioCompletion *c, librados::ObjectWriteOperation *op)
return ref.pool.ioctx().unwatch2(handle);
}
-int RGWSI_RADOS::Obj::notify(bufferlist& bl, uint64_t timeout_ms,
+int RGWSI_RADOS::Obj::notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl, optional_yield y)
{
- return rgw_rados_notify(ref.pool.ioctx(), ref.obj.oid, bl, timeout_ms, pbl, y);
+ return rgw_rados_notify(dpp, ref.pool.ioctx(), ref.obj.oid, bl, timeout_ms, pbl, y);
}
void RGWSI_RADOS::Obj::notify_ack(uint64_t notify_id,
return 0;
}
-int RGWSI_RADOS::Pool::open(const OpenParams& params)
+int RGWSI_RADOS::Pool::open(const DoutPrefixProvider *dpp, const OpenParams& params)
{
- return rados_svc->open_pool_ctx(pool, state.ioctx, params);
+ return rados_svc->open_pool_ctx(dpp, pool, state.ioctx, params);
}
-int RGWSI_RADOS::Pool::List::init(const string& marker, RGWAccessListFilter *filter)
+int RGWSI_RADOS::Pool::List::init(const DoutPrefixProvider *dpp, const string& marker, RGWAccessListFilter *filter)
{
if (ctx.initialized) {
return -EINVAL;
return -EINVAL;
}
- int r = pool->rados_svc->open_pool_ctx(pool->pool, ctx.ioctx);
+ int r = pool->rados_svc->open_pool_ctx(dpp, pool->pool, ctx.ioctx);
if (r < 0) {
return r;
}
librados::ObjectCursor oc;
if (!oc.from_str(marker)) {
- ldout(pool->rados_svc->cct, 10) << "failed to parse cursor: " << marker << dendl;
+ ldpp_dout(dpp, 10) << "failed to parse cursor: " << marker << dendl;
return -EINVAL;
}
};
private:
- int open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx,
+ int open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
const OpenParams& params = {});
int pool_iterate(librados::IoCtx& ioctx,
librados::NObjectIterator& iter,
int create();
int create(const std::vector<rgw_pool>& pools, std::vector<int> *retcodes);
int lookup();
- int open(const OpenParams& params = {});
+ int open(const DoutPrefixProvider *dpp, const OpenParams& params = {});
const rgw_pool& get_pool() {
return pool;
List() {}
List(Pool *_pool) : pool(_pool) {}
- int init(const string& marker, RGWAccessListFilter *filter = nullptr);
+ int init(const DoutPrefixProvider *dpp, const string& marker, RGWAccessListFilter *filter = nullptr);
int get_next(int max,
std::vector<string> *oids,
bool *is_truncated);
public:
Obj() {}
- int open();
+ int open(const DoutPrefixProvider *dpp);
- int operate(librados::ObjectWriteOperation *op, optional_yield y,
+ int operate(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation *op, optional_yield y,
int flags = 0);
- int operate(librados::ObjectReadOperation *op, bufferlist *pbl,
+ int operate(const DoutPrefixProvider *dpp, librados::ObjectReadOperation *op, bufferlist *pbl,
optional_yield y, int flags = 0);
int aio_operate(librados::AioCompletion *c, librados::ObjectWriteOperation *op);
int aio_operate(librados::AioCompletion *c, librados::ObjectReadOperation *op,
int watch(uint64_t *handle, librados::WatchCtx2 *ctx);
int aio_watch(librados::AioCompletion *c, uint64_t *handle, librados::WatchCtx2 *ctx);
int unwatch(uint64_t handle);
- int notify(bufferlist& bl, uint64_t timeout_ms,
+ int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl, optional_yield y);
void notify_ack(uint64_t notify_id,
uint64_t cookie,
objv_tracker, y, dpp);
}
-int RGWSI_SysObj::Obj::ROp::read(int64_t ofs, int64_t end, bufferlist *bl,
+int RGWSI_SysObj::Obj::ROp::read(const DoutPrefixProvider *dpp,
+ int64_t ofs, int64_t end, bufferlist *bl,
optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.get_obj();
- return svc->read(source.get_ctx(), *state,
+ return svc->read(dpp, source.get_ctx(), *state,
objv_tracker,
obj, bl, ofs, end,
attrs,
refresh_version, y);
}
-int RGWSI_SysObj::Obj::ROp::get_attr(const char *name, bufferlist *dest,
+int RGWSI_SysObj::Obj::ROp::get_attr(const DoutPrefixProvider *dpp,
+ const char *name, bufferlist *dest,
optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.get_obj();
- return svc->get_attr(obj, name, dest, y);
+ return svc->get_attr(dpp, obj, name, dest, y);
}
-int RGWSI_SysObj::Obj::WOp::remove(optional_yield y)
+int RGWSI_SysObj::Obj::WOp::remove(const DoutPrefixProvider *dpp, optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.get_obj();
- return svc->remove(source.get_ctx(),
+ return svc->remove(dpp, source.get_ctx(),
objv_tracker,
obj, y);
}
-int RGWSI_SysObj::Obj::WOp::write(bufferlist& bl, optional_yield y)
+int RGWSI_SysObj::Obj::WOp::write(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.get_obj();
- return svc->write(obj, pmtime, attrs, exclusive,
+ return svc->write(dpp, obj, pmtime, attrs, exclusive,
bl, objv_tracker, mtime, y);
}
-int RGWSI_SysObj::Obj::WOp::write_data(bufferlist& bl, optional_yield y)
+int RGWSI_SysObj::Obj::WOp::write_data(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.get_obj();
- return svc->write_data(obj, bl, exclusive, objv_tracker, y);
+ return svc->write_data(dpp, obj, bl, exclusive, objv_tracker, y);
}
-int RGWSI_SysObj::Obj::WOp::write_attrs(optional_yield y)
+int RGWSI_SysObj::Obj::WOp::write_attrs(const DoutPrefixProvider *dpp, optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.get_obj();
- return svc->set_attrs(obj, attrs, nullptr, objv_tracker, y);
+ return svc->set_attrs(dpp, obj, attrs, nullptr, objv_tracker, y);
}
-int RGWSI_SysObj::Obj::WOp::write_attr(const char *name, bufferlist& bl,
+int RGWSI_SysObj::Obj::WOp::write_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& bl,
optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
map<string, bufferlist> m;
m[name] = bl;
- return svc->set_attrs(obj, m, nullptr, objv_tracker, y);
+ return svc->set_attrs(dpp, obj, m, nullptr, objv_tracker, y);
}
-int RGWSI_SysObj::Pool::list_prefixed_objs(const string& prefix, std::function<void(const string&)> cb)
+int RGWSI_SysObj::Pool::list_prefixed_objs(const DoutPrefixProvider *dpp, const string& prefix, std::function<void(const string&)> cb)
{
- return core_svc->pool_list_prefixed_objs(pool, prefix, cb);
+ return core_svc->pool_list_prefixed_objs(dpp, pool, prefix, cb);
}
-int RGWSI_SysObj::Pool::Op::init(const string& marker, const string& prefix)
+int RGWSI_SysObj::Pool::Op::init(const DoutPrefixProvider *dpp, const string& marker, const string& prefix)
{
- return source.core_svc->pool_list_objects_init(source.pool, marker, prefix, &ctx);
+ return source.core_svc->pool_list_objects_init(dpp, source.pool, marker, prefix, &ctx);
}
int RGWSI_SysObj::Pool::Op::get_next(int max, vector<string> *oids, bool *is_truncated)
return source.core_svc->pool_list_objects_get_marker(ctx, marker);
}
-int RGWSI_SysObj::Obj::OmapOp::get_all(std::map<string, bufferlist> *m,
+int RGWSI_SysObj::Obj::OmapOp::get_all(const DoutPrefixProvider *dpp, std::map<string, bufferlist> *m,
optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.obj;
- return svc->omap_get_all(obj, m, y);
+ return svc->omap_get_all(dpp, obj, m, y);
}
-int RGWSI_SysObj::Obj::OmapOp::get_vals(const string& marker, uint64_t count,
+int RGWSI_SysObj::Obj::OmapOp::get_vals(const DoutPrefixProvider *dpp,
+ const string& marker, uint64_t count,
std::map<string, bufferlist> *m,
bool *pmore, optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.obj;
- return svc->omap_get_vals(obj, marker, count, m, pmore, y);
+ return svc->omap_get_vals(dpp, obj, marker, count, m, pmore, y);
}
-int RGWSI_SysObj::Obj::OmapOp::set(const std::string& key, bufferlist& bl,
+int RGWSI_SysObj::Obj::OmapOp::set(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& bl,
optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.obj;
- return svc->omap_set(obj, key, bl, must_exist, y);
+ return svc->omap_set(dpp, obj, key, bl, must_exist, y);
}
-int RGWSI_SysObj::Obj::OmapOp::set(const map<std::string, bufferlist>& m,
+int RGWSI_SysObj::Obj::OmapOp::set(const DoutPrefixProvider *dpp, const map<std::string, bufferlist>& m,
optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.obj;
- return svc->omap_set(obj, m, must_exist, y);
+ return svc->omap_set(dpp, obj, m, must_exist, y);
}
-int RGWSI_SysObj::Obj::OmapOp::del(const std::string& key, optional_yield y)
+int RGWSI_SysObj::Obj::OmapOp::del(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.obj;
- return svc->omap_del(obj, key, y);
+ return svc->omap_del(dpp, obj, key, y);
}
-int RGWSI_SysObj::Obj::WNOp::notify(bufferlist& bl, uint64_t timeout_ms,
+int RGWSI_SysObj::Obj::WNOp::notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms,
bufferlist *pbl, optional_yield y)
{
RGWSI_SysObj_Core *svc = source.core_svc;
rgw_raw_obj& obj = source.obj;
- return svc->notify(obj, bl, timeout_ms, pbl, y);
+ return svc->notify(dpp, obj, bl, timeout_ms, pbl, y);
}
RGWSI_Zone *RGWSI_SysObj::get_zone_svc()
ROp(Obj& _source);
int stat(optional_yield y, const DoutPrefixProvider *dpp);
- int read(int64_t ofs, int64_t end, bufferlist *pbl, optional_yield y);
- int read(bufferlist *pbl, optional_yield y) {
- return read(0, -1, pbl, y);
+ int read(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, bufferlist *pbl, optional_yield y);
+ int read(const DoutPrefixProvider *dpp, bufferlist *pbl, optional_yield y) {
+ return read(dpp, 0, -1, pbl, y);
}
- int get_attr(const char *name, bufferlist *dest, optional_yield y);
+ int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist *dest, optional_yield y);
};
struct WOp {
WOp(Obj& _source) : source(_source) {}
- int remove(optional_yield y);
- int write(bufferlist& bl, optional_yield y);
+ int remove(const DoutPrefixProvider *dpp, optional_yield y);
+ int write(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y);
- int write_data(bufferlist& bl, optional_yield y); /* write data only */
- int write_attrs(optional_yield y); /* write attrs only */
- int write_attr(const char *name, bufferlist& bl,
+ int write_data(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); /* write data only */
+ int write_attrs(const DoutPrefixProvider *dpp, optional_yield y); /* write attrs only */
+ int write_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& bl,
optional_yield y); /* write attrs only */
};
OmapOp(Obj& _source) : source(_source) {}
- int get_all(std::map<string, bufferlist> *m, optional_yield y);
- int get_vals(const string& marker, uint64_t count,
+ int get_all(const DoutPrefixProvider *dpp, std::map<string, bufferlist> *m, optional_yield y);
+ int get_vals(const DoutPrefixProvider *dpp, const string& marker, uint64_t count,
std::map<string, bufferlist> *m,
bool *pmore, optional_yield y);
- int set(const std::string& key, bufferlist& bl, optional_yield y);
- int set(const map<std::string, bufferlist>& m, optional_yield y);
- int del(const std::string& key, optional_yield y);
+ int set(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& bl, optional_yield y);
+ int set(const DoutPrefixProvider *dpp, const map<std::string, bufferlist>& m, optional_yield y);
+ int del(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y);
};
struct WNOp {
WNOp(Obj& _source) : source(_source) {}
- int notify(bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
+ int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
optional_yield y);
};
ROp rop() {
Op(Pool& _source) : source(_source) {}
- int init(const std::string& marker, const std::string& prefix);
+ int init(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& prefix);
int get_next(int max, std::vector<string> *oids, bool *is_truncated);
int get_marker(string *marker);
};
- int list_prefixed_objs(const std::string& prefix, std::function<void(const string&)> cb);
+ int list_prefixed_objs(const DoutPrefixProvider *dpp, const std::string& prefix, std::function<void(const string&)> cb);
template <typename Container>
- int list_prefixed_objs(const string& prefix,
+ int list_prefixed_objs(const DoutPrefixProvider *dpp, const string& prefix,
Container *result) {
- return list_prefixed_objs(prefix, [&](const string& val) {
+ return list_prefixed_objs(dpp, prefix, [&](const string& val) {
result->push_back(val);
});
}
RGWSI_SysObj_Cache *svc;
public:
RGWSI_SysObj_Cache_CB(RGWSI_SysObj_Cache *_svc) : svc(_svc) {}
- int watch_cb(uint64_t notify_id,
+ int watch_cb(const DoutPrefixProvider *dpp,
+ uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl) {
- return svc->watch_cb(notify_id, cookie, notifier_id, bl);
+ return svc->watch_cb(dpp, notify_id, cookie, notifier_id, bl);
}
void set_enabled(bool status) {
}
-int RGWSI_SysObj_Cache::remove(RGWSysObjectCtxBase& obj_ctx,
+int RGWSI_SysObj_Cache::remove(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
optional_yield y)
normalize_pool_and_obj(obj.pool, obj.oid, pool, oid);
string name = normal_name(pool, oid);
- cache.remove(name);
+ cache.remove(dpp, name);
ObjectCacheInfo info;
- int r = distribute_cache(name, obj, info, REMOVE_OBJ, y);
+ int r = distribute_cache(dpp, name, obj, info, REMOVE_OBJ, y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to distribute cache: r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to distribute cache: r=" << r << dendl;
}
- return RGWSI_SysObj_Core::remove(obj_ctx, objv_tracker, obj, y);
+ return RGWSI_SysObj_Core::remove(dpp, obj_ctx, objv_tracker, obj, y);
}
-int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx,
+int RGWSI_SysObj_Cache::read(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWSI_SysObj_Obj_GetObjState& read_state,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
rgw_pool pool;
string oid;
if (ofs != 0) {
- return RGWSI_SysObj_Core::read(obj_ctx, read_state, objv_tracker,
+ return RGWSI_SysObj_Core::read(dpp, obj_ctx, read_state, objv_tracker,
obj, obl, ofs, end, attrs, raw_attrs,
cache_info, refresh_version, y);
}
if (attrs)
flags |= CACHE_FLAG_XATTRS;
- int r = cache.get(name, info, flags, cache_info);
+ int r = cache.get(dpp, name, info, flags, cache_info);
if (r == 0 &&
(!refresh_version || !info.version.compare(&(*refresh_version)))) {
if (info.status < 0)
return -ENOENT;
map<string, bufferlist> unfiltered_attrset;
- r = RGWSI_SysObj_Core::read(obj_ctx, read_state, objv_tracker,
+ r = RGWSI_SysObj_Core::read(dpp, obj_ctx, read_state, objv_tracker,
obj, obl, ofs, end,
(attrs ? &unfiltered_attrset : nullptr),
true, /* cache unfiltered attrs */
if (r < 0) {
if (r == -ENOENT) { // only update ENOENT, we'd rather retry other errors
info.status = r;
- cache.put(name, info, cache_info);
+ cache.put(dpp, name, info, cache_info);
}
return r;
}
rgw_filter_attrset(info.xattrs, RGW_ATTR_PREFIX, attrs);
}
}
- cache.put(name, info, cache_info);
+ cache.put(dpp, name, info, cache_info);
return r;
}
-int RGWSI_SysObj_Cache::get_attr(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Cache::get_attr(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const char *attr_name,
bufferlist *dest,
optional_yield y)
uint32_t flags = CACHE_FLAG_XATTRS;
- int r = cache.get(name, info, flags, nullptr);
+ int r = cache.get(dpp, name, info, flags, nullptr);
if (r == 0) {
if (info.status < 0)
return info.status;
return -ENOENT;
}
/* don't try to cache this one */
- return RGWSI_SysObj_Core::get_attr(obj, attr_name, dest, y);
+ return RGWSI_SysObj_Core::get_attr(dpp, obj, attr_name, dest, y);
}
-int RGWSI_SysObj_Cache::set_attrs(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Cache::set_attrs(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
map<string, bufferlist>& attrs,
map<string, bufferlist> *rmattrs,
RGWObjVersionTracker *objv_tracker,
}
info.status = 0;
info.flags = CACHE_FLAG_MODIFY_XATTRS;
- int ret = RGWSI_SysObj_Core::set_attrs(obj, attrs, rmattrs, objv_tracker, y);
+ int ret = RGWSI_SysObj_Core::set_attrs(dpp, obj, attrs, rmattrs, objv_tracker, y);
string name = normal_name(pool, oid);
if (ret >= 0) {
if (objv_tracker && objv_tracker->read_version.ver) {
info.version = objv_tracker->read_version;
info.flags |= CACHE_FLAG_OBJV;
}
- cache.put(name, info, NULL);
- int r = distribute_cache(name, obj, info, UPDATE_OBJ, y);
+ cache.put(dpp, name, info, NULL);
+ int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y);
if (r < 0)
- ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl;
} else {
- cache.remove(name);
+ cache.remove(dpp, name);
}
return ret;
}
-int RGWSI_SysObj_Cache::write(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Cache::write(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
real_time *pmtime,
map<std::string, bufferlist>& attrs,
bool exclusive,
info.data = data;
info.flags = CACHE_FLAG_XATTRS | CACHE_FLAG_DATA | CACHE_FLAG_META;
ceph::real_time result_mtime;
- int ret = RGWSI_SysObj_Core::write(obj, &result_mtime, attrs,
+ int ret = RGWSI_SysObj_Core::write(dpp, obj, &result_mtime, attrs,
exclusive, data,
objv_tracker, set_mtime, y);
if (pmtime) {
info.meta.size = data.length();
string name = normal_name(pool, oid);
if (ret >= 0) {
- cache.put(name, info, NULL);
- int r = distribute_cache(name, obj, info, UPDATE_OBJ, y);
+ cache.put(dpp, name, info, NULL);
+ int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y);
if (r < 0)
- ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl;
} else {
- cache.remove(name);
+ cache.remove(dpp, name);
}
return ret;
}
-int RGWSI_SysObj_Cache::write_data(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Cache::write_data(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const bufferlist& data,
bool exclusive,
RGWObjVersionTracker *objv_tracker,
info.status = 0;
info.flags = CACHE_FLAG_DATA;
- int ret = RGWSI_SysObj_Core::write_data(obj, data, exclusive, objv_tracker, y);
+ int ret = RGWSI_SysObj_Core::write_data(dpp, obj, data, exclusive, objv_tracker, y);
string name = normal_name(pool, oid);
if (ret >= 0) {
if (objv_tracker && objv_tracker->read_version.ver) {
info.version = objv_tracker->read_version;
info.flags |= CACHE_FLAG_OBJV;
}
- cache.put(name, info, NULL);
- int r = distribute_cache(name, obj, info, UPDATE_OBJ, y);
+ cache.put(dpp, name, info, NULL);
+ int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y);
if (r < 0)
- ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl;
} else {
- cache.remove(name);
+ cache.remove(dpp, name);
}
return ret;
}
-int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *pepoch,
+int RGWSI_SysObj_Cache::raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *pepoch,
map<string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
uint32_t flags = CACHE_FLAG_META | CACHE_FLAG_XATTRS;
if (objv_tracker)
flags |= CACHE_FLAG_OBJV;
- int r = cache.get(name, info, flags, NULL);
+ int r = cache.get(dpp, name, info, flags, NULL);
if (r == 0) {
if (info.status < 0)
return info.status;
if (r == -ENODATA) {
return -ENOENT;
}
- r = RGWSI_SysObj_Core::raw_stat(obj, &size, &mtime, &epoch, &info.xattrs,
+ r = RGWSI_SysObj_Core::raw_stat(dpp, obj, &size, &mtime, &epoch, &info.xattrs,
first_chunk, objv_tracker, y);
if (r < 0) {
if (r == -ENOENT) {
info.status = r;
- cache.put(name, info, NULL);
+ cache.put(dpp, name, info, NULL);
}
return r;
}
info.flags |= CACHE_FLAG_OBJV;
info.version = objv_tracker->read_version;
}
- cache.put(name, info, NULL);
+ cache.put(dpp, name, info, NULL);
done:
if (psize)
*psize = size;
return 0;
}
-int RGWSI_SysObj_Cache::distribute_cache(const string& normal_name,
+int RGWSI_SysObj_Cache::distribute_cache(const DoutPrefixProvider *dpp,
+ const string& normal_name,
const rgw_raw_obj& obj,
ObjectCacheInfo& obj_info, int op,
optional_yield y)
info.obj = obj;
bufferlist bl;
encode(info, bl);
- return notify_svc->distribute(normal_name, bl, y);
+ return notify_svc->distribute(dpp, normal_name, bl, y);
}
-int RGWSI_SysObj_Cache::watch_cb(uint64_t notify_id,
+int RGWSI_SysObj_Cache::watch_cb(const DoutPrefixProvider *dpp,
+ uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl)
switch (info.op) {
case UPDATE_OBJ:
- cache.put(name, info.obj_info, NULL);
+ cache.put(dpp, name, info.obj_info, NULL);
break;
case REMOVE_OBJ:
- cache.remove(name);
+ cache.remove(dpp, name);
break;
default:
ldout(cct, 0) << "WARNING: got unknown notification op: " << info.op << dendl;
cache.set_enabled(status);
}
-bool RGWSI_SysObj_Cache::chain_cache_entry(std::initializer_list<rgw_cache_entry_info *> cache_info_entries,
+bool RGWSI_SysObj_Cache::chain_cache_entry(const DoutPrefixProvider *dpp,
+ std::initializer_list<rgw_cache_entry_info *> cache_info_entries,
RGWChainedCache::Entry *chained_entry)
{
- return cache.chain_cache_entry(cache_info_entries, chained_entry);
+ return cache.chain_cache_entry(dpp, cache_info_entries, chained_entry);
}
void RGWSI_SysObj_Cache::register_chained_cache(RGWChainedCache *cc)
return -ENOSYS;
}
-RGWSI_SysObj_Cache::ASocketHandler::ASocketHandler(RGWSI_SysObj_Cache *_svc) : svc(_svc)
+RGWSI_SysObj_Cache::ASocketHandler::ASocketHandler(const DoutPrefixProvider *_dpp, RGWSI_SysObj_Cache *_svc) : dpp(_dpp), svc(_svc)
{
hook.reset(new RGWSI_SysObj_Cache_ASocketHook(_svc));
}
int RGWSI_SysObj_Cache::ASocketHandler::call_inspect(const std::string& target, Formatter* f)
{
- if (const auto entry = svc->cache.get(target)) {
+ if (const auto entry = svc->cache.get(dpp, target)) {
f->open_object_section("cache_entry");
f->dump_string("name", target.c_str());
entry->dump(f);
int RGWSI_SysObj_Cache::ASocketHandler::call_erase(const std::string& target)
{
- return svc->cache.remove(target);
+ return svc->cache.remove(dpp, target);
}
int RGWSI_SysObj_Cache::ASocketHandler::call_zap()
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
void shutdown() override;
- int raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
+ int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
map<string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker,
optional_yield y) override;
- int read(RGWSysObjectCtxBase& obj_ctx,
+ int read(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWSI_SysObj_Obj_GetObjState& read_state,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
boost::optional<obj_version>,
optional_yield y) override;
- int get_attr(const rgw_raw_obj& obj, const char *name, bufferlist *dest,
+ int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const char *name, bufferlist *dest,
optional_yield y) override;
- int set_attrs(const rgw_raw_obj& obj,
+ int set_attrs(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
map<string, bufferlist>& attrs,
map<string, bufferlist> *rmattrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- int remove(RGWSysObjectCtxBase& obj_ctx,
+ int remove(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
optional_yield y) override;
- int write(const rgw_raw_obj& obj,
+ int write(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
real_time *pmtime,
map<std::string, bufferlist>& attrs,
bool exclusive,
real_time set_mtime,
optional_yield y) override;
- int write_data(const rgw_raw_obj& obj,
+ int write_data(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const bufferlist& bl,
bool exclusive,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- int distribute_cache(const string& normal_name, const rgw_raw_obj& obj,
+ int distribute_cache(const DoutPrefixProvider *dpp, const string& normal_name, const rgw_raw_obj& obj,
ObjectCacheInfo& obj_info, int op,
optional_yield y);
- int watch_cb(uint64_t notify_id,
+ int watch_cb(const DoutPrefixProvider *dpp,
+ uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl);
void set_enabled(bool status);
public:
- RGWSI_SysObj_Cache(CephContext *cct) : RGWSI_SysObj_Core(cct), asocket(this) {
+ RGWSI_SysObj_Cache(const DoutPrefixProvider *dpp, CephContext *cct) : RGWSI_SysObj_Core(cct), asocket(dpp, this) {
cache.set_ctx(cct);
}
- bool chain_cache_entry(std::initializer_list<rgw_cache_entry_info *> cache_info_entries,
+ bool chain_cache_entry(const DoutPrefixProvider *dpp,
+ std::initializer_list<rgw_cache_entry_info *> cache_info_entries,
RGWChainedCache::Entry *chained_entry);
void register_chained_cache(RGWChainedCache *cc);
void unregister_chained_cache(RGWChainedCache *cc);
class ASocketHandler {
+ const DoutPrefixProvider *dpp;
RGWSI_SysObj_Cache *svc;
std::unique_ptr<RGWSI_SysObj_Cache_ASocketHook> hook;
public:
- ASocketHandler(RGWSI_SysObj_Cache *_svc);
+ ASocketHandler(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *_svc);
~ASocketHandler();
int start();
return iter->second.first;
}
- bool put(RGWSI_SysObj_Cache *svc, const string& key, T *entry,
+ bool put(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *svc, const string& key, T *entry,
std::initializer_list<rgw_cache_entry_info *> cache_info_entries) {
if (!svc) {
return false;
Entry chain_entry(this, key, entry);
/* we need the svc cache to call us under its lock to maintain lock ordering */
- return svc->chain_cache_entry(cache_info_entries, &chain_entry);
+ return svc->chain_cache_entry(dpp, cache_info_entries, &chain_entry);
}
void chain_cb(const string& key, void *data) override {
#define dout_subsys ceph_subsys_rgw
-int RGWSI_SysObj_Core_GetObjState::get_rados_obj(RGWSI_RADOS *rados_svc,
+int RGWSI_SysObj_Core_GetObjState::get_rados_obj(const DoutPrefixProvider *dpp,
+ RGWSI_RADOS *rados_svc,
RGWSI_Zone *zone_svc,
const rgw_raw_obj& obj,
RGWSI_RADOS::Obj **pobj)
{
if (!has_rados_obj) {
if (obj.oid.empty()) {
- ldout(rados_svc->ctx(), 0) << "ERROR: obj.oid is empty" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: obj.oid is empty" << dendl;
return -EINVAL;
}
rados_obj = rados_svc->obj(obj);
- int r = rados_obj.open();
+ int r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_SysObj_Core::get_rados_obj(RGWSI_Zone *zone_svc,
+int RGWSI_SysObj_Core::get_rados_obj(const DoutPrefixProvider *dpp,
+ RGWSI_Zone *zone_svc,
const rgw_raw_obj& obj,
RGWSI_RADOS::Obj *pobj)
{
if (obj.oid.empty()) {
- ldout(rados_svc->ctx(), 0) << "ERROR: obj.oid is empty" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: obj.oid is empty" << dendl;
return -EINVAL;
}
*pobj = rados_svc->obj(obj);
- int r = pobj->open();
+ int r = pobj->open(dpp);
if (r < 0) {
return r;
}
s->obj = obj;
- int r = raw_stat(obj, &s->size, &s->mtime, &s->epoch, &s->attrset,
+ int r = raw_stat(dpp, obj, &s->size, &s->mtime, &s->epoch, &s->attrset,
(s->prefetch_data ? &s->data : nullptr), objv_tracker, y);
if (r == -ENOENT) {
s->exists = false;
return ret;
}
-int RGWSI_SysObj_Core::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
+int RGWSI_SysObj_Core::raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
map<string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
return r;
}
op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, nullptr);
}
bufferlist outbl;
- r = rados_obj.operate(&op, &outbl, y);
+ r = rados_obj.operate(dpp, &op, &outbl, y);
if (epoch) {
*epoch = rados_obj.get_last_version();
return 0;
}
-int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx,
+int RGWSI_SysObj_Core::read(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWSI_SysObj_Obj_GetObjState& _read_state,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
objv_tracker->prepare_op_for_read(&op);
}
- ldout(cct, 20) << "rados->read ofs=" << ofs << " len=" << len << dendl;
+ ldpp_dout(dpp, 20) << "rados->read ofs=" << ofs << " len=" << len << dendl;
op.read(ofs, len, bl, nullptr);
map<string, bufferlist> unfiltered_attrset;
}
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
- r = rados_obj.operate(&op, nullptr, y);
+ r = rados_obj.operate(dpp, &op, nullptr, y);
if (r < 0) {
- ldout(cct, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl;
+ ldpp_dout(dpp, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl;
return r;
}
- ldout(cct, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl;
+ ldpp_dout(dpp, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl;
uint64_t op_ver = rados_obj.get_last_version();
if (read_state.last_ver > 0 &&
read_state.last_ver != op_ver) {
- ldout(cct, 5) << "raced with an object write, abort" << dendl;
+ ldpp_dout(dpp, 5) << "raced with an object write, abort" << dendl;
return -ECANCELED;
}
* dest: bufferlist to store the result in
* Returns: 0 on success, -ERR# otherwise.
*/
-int RGWSI_SysObj_Core::get_attr(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Core::get_attr(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const char *name,
bufferlist *dest,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
int rval;
op.getxattr(name, dest, &rval);
- r = rados_obj.operate(&op, nullptr, y);
+ r = rados_obj.operate(dpp, &op, nullptr, y);
if (r < 0)
return r;
return 0;
}
-int RGWSI_SysObj_Core::set_attrs(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Core::set_attrs(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
map<string, bufferlist>& attrs,
map<string, bufferlist> *rmattrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
bufferlist bl;
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
if (r < 0)
return r;
return 0;
}
-int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Core::omap_get_vals(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const string& marker,
uint64_t count,
std::map<string, bufferlist> *m,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
int rval;
op.omap_get_vals2(start_after, count, &t, &more, &rval);
- r = rados_obj.operate(&op, nullptr, y);
+ r = rados_obj.operate(dpp, &op, nullptr, y);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_SysObj_Core::omap_get_all(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Core::omap_get_all(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
std::map<string, bufferlist> *m,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
int rval;
op.omap_get_vals2(start_after, count, &t, &more, &rval);
- r = rados_obj.operate(&op, nullptr, y);
+ r = rados_obj.operate(dpp, &op, nullptr, y);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, const std::string& key,
+int RGWSI_SysObj_Core::omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key,
bufferlist& bl, bool must_exist,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
- ldout(cct, 15) << "omap_set obj=" << obj << " key=" << key << dendl;
+ ldpp_dout(dpp, 15) << "omap_set obj=" << obj << " key=" << key << dendl;
map<string, bufferlist> m;
m[key] = bl;
if (must_exist)
op.assert_exists();
op.omap_set(m);
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
return r;
}
-int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Core::omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
const std::map<std::string, bufferlist>& m,
bool must_exist, optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
if (must_exist)
op.assert_exists();
op.omap_set(m);
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
return r;
}
-int RGWSI_SysObj_Core::omap_del(const rgw_raw_obj& obj, const std::string& key,
+int RGWSI_SysObj_Core::omap_del(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
op.omap_rm_keys(k);
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
return r;
}
-int RGWSI_SysObj_Core::notify(const rgw_raw_obj& obj, bufferlist& bl,
+int RGWSI_SysObj_Core::notify(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
- r = rados_obj.notify(bl, timeout_ms, pbl, y);
+ r = rados_obj.notify(dpp, bl, timeout_ms, pbl, y);
return r;
}
-int RGWSI_SysObj_Core::remove(RGWSysObjectCtxBase& obj_ctx,
+int RGWSI_SysObj_Core::remove(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
}
op.remove();
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
if (r < 0)
return r;
return 0;
}
-int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Core::write(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
real_time *pmtime,
map<std::string, bufferlist>& attrs,
bool exclusive,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
op.setxattr(name.c_str(), bl);
}
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
if (r < 0) {
return r;
}
}
-int RGWSI_SysObj_Core::write_data(const rgw_raw_obj& obj,
+int RGWSI_SysObj_Core::write_data(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const bufferlist& bl,
bool exclusive,
RGWObjVersionTracker *objv_tracker,
optional_yield y)
{
RGWSI_RADOS::Obj rados_obj;
- int r = get_rados_obj(zone_svc, obj, &rados_obj);
+ int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj);
if (r < 0) {
- ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl;
return r;
}
objv_tracker->prepare_op_for_write(&op);
}
op.write_full(bl);
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
if (r < 0)
return r;
return 0;
}
-int RGWSI_SysObj_Core::pool_list_prefixed_objs(const rgw_pool& pool, const string& prefix,
+int RGWSI_SysObj_Core::pool_list_prefixed_objs(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool, const string& prefix,
std::function<void(const string&)> cb)
{
bool is_truncated;
RGWAccessListFilterPrefix filter(prefix);
- int r = op.init(string(), &filter);
+ int r = op.init(dpp, string(), &filter);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_SysObj_Core::pool_list_objects_init(const rgw_pool& pool,
+int RGWSI_SysObj_Core::pool_list_objects_init(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
const string& marker,
const string& prefix,
RGWSI_SysObj::Pool::ListCtx *_ctx)
ctx.pool = rados_svc->pool(pool);
ctx.op = ctx.pool.op();
- int r = ctx.op.init(marker, &ctx.filter);
+ int r = ctx.op.init(dpp, marker, &ctx.filter);
if (r < 0) {
- ldout(cct, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl;
return r;
}
return 0;
rados_svc = _rados_svc;
zone_svc = _zone_svc;
}
- int get_rados_obj(RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj);
+ int get_rados_obj(const DoutPrefixProvider *dpp, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj);
- virtual int raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
+ virtual int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize,
+ real_time *pmtime, uint64_t *epoch,
map<string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- virtual int read(RGWSysObjectCtxBase& obj_ctx,
+ virtual int read(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWSI_SysObj_Obj_GetObjState& read_state,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
boost::optional<obj_version>,
optional_yield y);
- virtual int remove(RGWSysObjectCtxBase& obj_ctx,
+ virtual int remove(const DoutPrefixProvider *dpp,
+ RGWSysObjectCtxBase& obj_ctx,
RGWObjVersionTracker *objv_tracker,
const rgw_raw_obj& obj,
optional_yield y);
- virtual int write(const rgw_raw_obj& obj,
+ virtual int write(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
real_time *pmtime,
map<std::string, bufferlist>& attrs,
bool exclusive,
real_time set_mtime,
optional_yield y);
- virtual int write_data(const rgw_raw_obj& obj,
+ virtual int write_data(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const bufferlist& bl,
bool exclusive,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- virtual int get_attr(const rgw_raw_obj& obj, const char *name, bufferlist *dest,
+ virtual int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
+ const char *name, bufferlist *dest,
optional_yield y);
- virtual int set_attrs(const rgw_raw_obj& obj,
+ virtual int set_attrs(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
map<string, bufferlist>& attrs,
map<string, bufferlist> *rmattrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y);
- virtual int omap_get_all(const rgw_raw_obj& obj, std::map<string, bufferlist> *m,
+ virtual int omap_get_all(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, std::map<string, bufferlist> *m,
optional_yield y);
- virtual int omap_get_vals(const rgw_raw_obj& obj,
+ virtual int omap_get_vals(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj,
const string& marker,
uint64_t count,
std::map<string, bufferlist> *m,
bool *pmore,
optional_yield y);
- virtual int omap_set(const rgw_raw_obj& obj, const std::string& key,
+ virtual int omap_set(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj, const std::string& key,
bufferlist& bl, bool must_exist,
optional_yield y);
- virtual int omap_set(const rgw_raw_obj& obj,
+ virtual int omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj,
const map<std::string, bufferlist>& m, bool must_exist,
optional_yield y);
- virtual int omap_del(const rgw_raw_obj& obj, const std::string& key,
+ virtual int omap_del(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key,
optional_yield y);
- virtual int notify(const rgw_raw_obj& obj, bufferlist& bl,
+ virtual int notify(const DoutPrefixProvider *dpp,
+ const rgw_raw_obj& obj, bufferlist& bl,
uint64_t timeout_ms, bufferlist *pbl,
optional_yield y);
- virtual int pool_list_prefixed_objs(const rgw_pool& pool,
+ virtual int pool_list_prefixed_objs(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
const string& prefix,
std::function<void(const string&)> cb);
- virtual int pool_list_objects_init(const rgw_pool& pool,
+ virtual int pool_list_objects_init(const DoutPrefixProvider *dpp,
+ const rgw_pool& pool,
const std::string& marker,
const std::string& prefix,
RGWSI_SysObj::Pool::ListCtx *ctx);
RGWSI_SysObj_Core_GetObjState() {}
- int get_rados_obj(RGWSI_RADOS *rados_svc,
+ int get_rados_obj(const DoutPrefixProvider *dpp,
+ RGWSI_RADOS *rados_svc,
RGWSI_Zone *zone_svc,
const rgw_raw_obj& obj,
RGWSI_RADOS::Obj **pobj);
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
- virtual int add_bucket(RGWSI_MetaBackend::Context *ctx,
+ virtual int add_bucket(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const rgw_bucket& bucket,
ceph::real_time creation_time,
optional_yield y) = 0;
- virtual int remove_bucket(RGWSI_MetaBackend::Context *ctx,
+ virtual int remove_bucket(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const rgw_bucket& _bucket, optional_yield) = 0;
- virtual int list_buckets(RGWSI_MetaBackend::Context *ctx,
+ virtual int list_buckets(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const string& marker,
const string& end_marker,
bool *is_truncated,
optional_yield y) = 0;
- virtual int flush_bucket_stats(RGWSI_MetaBackend::Context *ctx,
+ virtual int flush_bucket_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const RGWBucketEnt& ent, optional_yield y) = 0;
- virtual int complete_flush_stats(RGWSI_MetaBackend::Context *ctx,
+ virtual int complete_flush_stats(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, optional_yield y) = 0;
- virtual int reset_bucket_stats(RGWSI_MetaBackend::Context *ctx,
+ virtual int reset_bucket_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
optional_yield y) = 0;
- virtual int read_stats(RGWSI_MetaBackend::Context *ctx,
+ virtual int read_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, RGWStorageStats *stats,
ceph::real_time *last_stats_sync, /* last time a full stats sync completed */
ceph::real_time *last_stats_update,
optional_yield y) = 0; /* last time a stats update was done */
- virtual int read_stats_async(RGWSI_MetaBackend::Context *ctx,
+ virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, RGWGetUserStats_CB *cb) = 0;
};
if (!info.user_email.empty()) {
if (!old_info ||
old_info->user_email.compare(info.user_email) != 0) { /* only if new index changed */
- ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_email_pool, info.user_email,
+ ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_email_pool, info.user_email,
link_bl, exclusive, NULL, real_time(), y);
if (ret < 0)
return ret;
if (old_info && old_info->access_keys.count(iter->first) != 0 && !renamed)
continue;
- ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_keys_pool, k.id,
+ ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_keys_pool, k.id,
link_bl, exclusive, NULL, real_time(), y);
if (ret < 0)
return ret;
if (old_info && old_info->swift_keys.count(siter->first) != 0 && !renamed)
continue;
- ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_swift_pool, k.id,
+ ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_swift_pool, k.id,
link_bl, exclusive, NULL, real_time(), y);
if (ret < 0)
return ret;
if (!old_info.user_email.empty() &&
old_info.user_email != new_info.user_email) {
- ret = svc.user->remove_email_index(ctx, old_info.user_email, y);
+ ret = svc.user->remove_email_index(dpp, ctx, old_info.user_email, y);
if (ret < 0 && ret != -ENOENT) {
set_err_msg("ERROR: could not remove index for email " + old_info.user_email);
return ret;
for ([[maybe_unused]] const auto& [name, access_key] : old_info.access_keys) {
if (!new_info.access_keys.count(access_key.id)) {
- ret = svc.user->remove_key_index(ctx, access_key, y);
+ ret = svc.user->remove_key_index(dpp, ctx, access_key, y);
if (ret < 0 && ret != -ENOENT) {
set_err_msg("ERROR: could not remove index for key " + access_key.id);
return ret;
const auto& swift_key = old_iter->second;
auto new_iter = new_info.swift_keys.find(swift_key.id);
if (new_iter == new_info.swift_keys.end()) {
- ret = svc.user->remove_swift_name_index(ctx, swift_key.id, y);
+ ret = svc.user->remove_swift_name_index(dpp, ctx, swift_key.id, y);
if (ret < 0 && ret != -ENOENT) {
set_err_msg("ERROR: could not remove index for swift_name " + swift_key.id);
return ret;
return 0;
}
-int RGWSI_User_RADOS::remove_key_index(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_User_RADOS::remove_key_index(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
const RGWAccessKey& access_key,
optional_yield y)
{
RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast<RGWSI_MetaBackend_SObj::Context_SObj *>(_ctx);
rgw_raw_obj obj(svc.zone->get_zone_params().user_keys_pool, access_key.id);
auto sysobj = ctx->obj_ctx->get_obj(obj);
- return sysobj.wop().remove(y);
+ return sysobj.wop().remove(dpp, y);
}
-int RGWSI_User_RADOS::remove_email_index(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_User_RADOS::remove_email_index(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
const string& email,
optional_yield y)
{
RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast<RGWSI_MetaBackend_SObj::Context_SObj *>(_ctx);
rgw_raw_obj obj(svc.zone->get_zone_params().user_email_pool, email);
auto sysobj = ctx->obj_ctx->get_obj(obj);
- return sysobj.wop().remove(y);
+ return sysobj.wop().remove(dpp, y);
}
-int RGWSI_User_RADOS::remove_swift_name_index(RGWSI_MetaBackend::Context *_ctx, const string& swift_name,
+int RGWSI_User_RADOS::remove_swift_name_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& swift_name,
optional_yield y)
{
RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast<RGWSI_MetaBackend_SObj::Context_SObj *>(_ctx);
rgw_raw_obj obj(svc.zone->get_zone_params().user_swift_pool, swift_name);
auto sysobj = ctx->obj_ctx->get_obj(obj);
- return sysobj.wop().remove(y);
+ return sysobj.wop().remove(dpp, y);
}
/**
{
int ret;
- auto cct = svc.meta_be->ctx();
-
auto kiter = info.access_keys.begin();
for (; kiter != info.access_keys.end(); ++kiter) {
ldpp_dout(dpp, 10) << "removing key index: " << kiter->first << dendl;
- ret = remove_key_index(_ctx, kiter->second, y);
+ ret = remove_key_index(dpp, _ctx, kiter->second, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: could not remove " << kiter->first << " (access key object), should be fixed (err=" << ret << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not remove " << kiter->first << " (access key object), should be fixed (err=" << ret << ")" << dendl;
return ret;
}
}
auto& k = siter->second;
ldpp_dout(dpp, 10) << "removing swift subuser index: " << k.id << dendl;
/* check if swift mapping exists */
- ret = remove_swift_name_index(_ctx, k.id, y);
+ ret = remove_swift_name_index(dpp, _ctx, k.id, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: could not remove " << k.id << " (swift name object), should be fixed (err=" << ret << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not remove " << k.id << " (swift name object), should be fixed (err=" << ret << ")" << dendl;
return ret;
}
}
ldpp_dout(dpp, 10) << "removing email index: " << info.user_email << dendl;
- ret = remove_email_index(_ctx, info.user_email, y);
+ ret = remove_email_index(dpp, _ctx, info.user_email, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: could not remove email index object for "
+ ldpp_dout(dpp, 0) << "ERROR: could not remove email index object for "
<< info.user_email << ", should be fixed (err=" << ret << ")" << dendl;
return ret;
}
ldpp_dout(dpp, 10) << "removing user buckets index" << dendl;
RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast<RGWSI_MetaBackend_SObj::Context_SObj *>(_ctx);
auto sysobj = ctx->obj_ctx->get_obj(uid_bucks);
- ret = sysobj.wop().remove(y);
+ ret = sysobj.wop().remove(dpp, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl;
return ret;
}
return -EIO;
}
- uinfo_cache->put(svc.cache, cache_key, &e, { &cache_info });
+ uinfo_cache->put(dpp, svc.cache, cache_key, &e, { &cache_info });
*info = e.info;
if (objv_tracker)
info, objv_tracker, pmtime, y, dpp);
}
-int RGWSI_User_RADOS::cls_user_update_buckets(rgw_raw_obj& obj, list<cls_user_bucket_entry>& entries, bool add, optional_yield y)
+int RGWSI_User_RADOS::cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, list<cls_user_bucket_entry>& entries, bool add, optional_yield y)
{
auto rados_obj = svc.rados->obj(obj);
- int r = rados_obj.open();
+ int r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
cls_user_set_buckets(op, entries, add);
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_User_RADOS::cls_user_add_bucket(rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y)
+int RGWSI_User_RADOS::cls_user_add_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y)
{
list<cls_user_bucket_entry> l;
l.push_back(entry);
- return cls_user_update_buckets(obj, l, true, y);
+ return cls_user_update_buckets(dpp, obj, l, true, y);
}
-int RGWSI_User_RADOS::cls_user_remove_bucket(rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y)
+int RGWSI_User_RADOS::cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y)
{
auto rados_obj = svc.rados->obj(obj);
- int r = rados_obj.open();
+ int r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
::cls_user_remove_bucket(op, bucket);
- r = rados_obj.operate(&op, y);
+ r = rados_obj.operate(dpp, &op, y);
if (r < 0)
return r;
return 0;
}
-int RGWSI_User_RADOS::add_bucket(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::add_bucket(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const rgw_bucket& bucket,
ceph::real_time creation_time,
new_bucket.creation_time = creation_time;
rgw_raw_obj obj = get_buckets_obj(user);
- ret = cls_user_add_bucket(obj, new_bucket, y);
+ ret = cls_user_add_bucket(dpp, obj, new_bucket, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: error adding bucket to user: ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user: ret=" << ret << dendl;
return ret;
}
}
-int RGWSI_User_RADOS::remove_bucket(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::remove_bucket(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const rgw_bucket& _bucket,
optional_yield y)
cls_user_bucket bucket;
bucket.name = _bucket.name;
rgw_raw_obj obj = get_buckets_obj(user);
- int ret = cls_user_remove_bucket(obj, bucket, y);
+ int ret = cls_user_remove_bucket(dpp, obj, bucket, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: error removing bucket from user: ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: error removing bucket from user: ret=" << ret << dendl;
}
return 0;
}
-int RGWSI_User_RADOS::cls_user_flush_bucket_stats(rgw_raw_obj& user_obj,
+int RGWSI_User_RADOS::cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp,
+ rgw_raw_obj& user_obj,
const RGWBucketEnt& ent, optional_yield y)
{
cls_user_bucket_entry entry;
list<cls_user_bucket_entry> entries;
entries.push_back(entry);
- int r = cls_user_update_buckets(user_obj, entries, false, y);
+ int r = cls_user_update_buckets(dpp, user_obj, entries, false, y);
if (r < 0) {
- ldout(cct, 20) << "cls_user_update_buckets() returned " << r << dendl;
+ ldpp_dout(dpp, 20) << "cls_user_update_buckets() returned " << r << dendl;
return r;
}
return 0;
}
-int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj,
+int RGWSI_User_RADOS::cls_user_list_buckets(const DoutPrefixProvider *dpp,
+ rgw_raw_obj& obj,
const string& in_marker,
const string& end_marker,
const int max_entries,
optional_yield y)
{
auto rados_obj = svc.rados->obj(obj);
- int r = rados_obj.open();
+ int r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
cls_user_bucket_list(op, in_marker, end_marker, max_entries, entries, out_marker, truncated, &rc);
bufferlist ibl;
- r = rados_obj.operate(&op, &ibl, y);
+ r = rados_obj.operate(dpp, &op, &ibl, y);
if (r < 0)
return r;
if (rc < 0)
return 0;
}
-int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::list_buckets(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const string& marker,
const string& end_marker,
buckets->clear();
if (user.id == RGW_USER_ANON_ID) {
- ldout(cct, 20) << "RGWSI_User_RADOS::list_buckets(): anonymous user" << dendl;
+ ldpp_dout(dpp, 20) << "RGWSI_User_RADOS::list_buckets(): anonymous user" << dendl;
*is_truncated = false;
return 0;
}
do {
std::list<cls_user_bucket_entry> entries;
- ret = cls_user_list_buckets(obj, m, end_marker, max - total, entries, &m, &truncated, y);
+ ret = cls_user_list_buckets(dpp, obj, m, end_marker, max - total, entries, &m, &truncated, y);
if (ret == -ENOENT) {
ret = 0;
}
return 0;
}
-int RGWSI_User_RADOS::flush_bucket_stats(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::flush_bucket_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const RGWBucketEnt& ent,
optional_yield y)
{
rgw_raw_obj obj = get_buckets_obj(user);
- return cls_user_flush_bucket_stats(obj, ent, y);
+ return cls_user_flush_bucket_stats(dpp, obj, ent, y);
}
-int RGWSI_User_RADOS::reset_bucket_stats(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::reset_bucket_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
optional_yield y)
{
- return cls_user_reset_stats(user, y);
+ return cls_user_reset_stats(dpp, user, y);
}
-int RGWSI_User_RADOS::cls_user_reset_stats(const rgw_user& user, optional_yield y)
+int RGWSI_User_RADOS::cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y)
{
rgw_raw_obj obj = get_buckets_obj(user);
auto rados_obj = svc.rados->obj(obj);
- int rval, r = rados_obj.open();
+ int rval, r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
encode(call, in);
op.exec("user", "reset_user_stats2", in, &out, &rval);
- r = rados_obj.operate(&op, y, librados::OPERATION_RETURNVEC);
+ r = rados_obj.operate(dpp, &op, y, librados::OPERATION_RETURNVEC);
if (r < 0) {
return r;
}
return rval;
}
-int RGWSI_User_RADOS::complete_flush_stats(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::complete_flush_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, optional_yield y)
{
rgw_raw_obj obj = get_buckets_obj(user);
auto rados_obj = svc.rados->obj(obj);
- int r = rados_obj.open();
+ int r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
::cls_user_complete_stats_sync(op);
- return rados_obj.operate(&op, y);
+ return rados_obj.operate(dpp, &op, y);
}
-int RGWSI_User_RADOS::cls_user_get_header(const rgw_user& user, cls_user_header *header,
+int RGWSI_User_RADOS::cls_user_get_header(const DoutPrefixProvider *dpp,
+ const rgw_user& user, cls_user_header *header,
optional_yield y)
{
rgw_raw_obj obj = get_buckets_obj(user);
auto rados_obj = svc.rados->obj(obj);
- int r = rados_obj.open();
+ int r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
bufferlist ibl;
librados::ObjectReadOperation op;
::cls_user_get_header(op, header, &rc);
- return rados_obj.operate(&op, &ibl, y);
+ return rados_obj.operate(dpp, &op, &ibl, y);
}
-int RGWSI_User_RADOS::cls_user_get_header_async(const string& user_str, RGWGetUserHeader_CB *cb)
+int RGWSI_User_RADOS::cls_user_get_header_async(const DoutPrefixProvider *dpp, const string& user_str, RGWGetUserHeader_CB *cb)
{
rgw_raw_obj obj = get_buckets_obj(rgw_user(user_str));
auto rados_obj = svc.rados->obj(obj);
- int r = rados_obj.open();
+ int r = rados_obj.open(dpp);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_User_RADOS::read_stats(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::read_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, RGWStorageStats *stats,
ceph::real_time *last_stats_sync,
ceph::real_time *last_stats_update,
string user_str = user.to_str();
cls_user_header header;
- int r = cls_user_get_header(rgw_user(user_str), &header, y);
+ int r = cls_user_get_header(dpp, rgw_user(user_str), &header, y);
if (r < 0)
return r;
}
};
-int RGWSI_User_RADOS::read_stats_async(RGWSI_MetaBackend::Context *ctx,
+int RGWSI_User_RADOS::read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, RGWGetUserStats_CB *_cb)
{
string user_str = user.to_str();
RGWGetUserStatsContext *cb = new RGWGetUserStatsContext(_cb);
- int r = cls_user_get_header_async(user_str, cb);
+ int r = cls_user_get_header_async(dpp, user_str, cb);
if (r < 0) {
_cb->put();
delete cb;
int remove_uid_index(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& user_info, RGWObjVersionTracker *objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp);
- int remove_key_index(RGWSI_MetaBackend::Context *ctx, const RGWAccessKey& access_key, optional_yield y);
- int remove_email_index(RGWSI_MetaBackend::Context *ctx, const string& email, optional_yield y);
- int remove_swift_name_index(RGWSI_MetaBackend::Context *ctx, const string& swift_name, optional_yield y);
+ int remove_key_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const RGWAccessKey& access_key, optional_yield y);
+ int remove_email_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& email, optional_yield y);
+ int remove_swift_name_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& swift_name, optional_yield y);
/* admin management */
- int cls_user_update_buckets(rgw_raw_obj& obj, list<cls_user_bucket_entry>& entries, bool add, optional_yield y);
- int cls_user_add_bucket(rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y);
- int cls_user_remove_bucket(rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y);
+ int cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, list<cls_user_bucket_entry>& entries, bool add, optional_yield y);
+ int cls_user_add_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y);
+ int cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y);
/* quota stats */
- int cls_user_flush_bucket_stats(rgw_raw_obj& user_obj,
+ int cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp, rgw_raw_obj& user_obj,
const RGWBucketEnt& ent, optional_yield y);
- int cls_user_list_buckets(rgw_raw_obj& obj,
+ int cls_user_list_buckets(const DoutPrefixProvider *dpp,
+ rgw_raw_obj& obj,
const string& in_marker,
const string& end_marker,
const int max_entries,
bool * const truncated,
optional_yield y);
- int cls_user_reset_stats(const rgw_user& user, optional_yield y);
- int cls_user_get_header(const rgw_user& user, cls_user_header *header, optional_yield y);
- int cls_user_get_header_async(const string& user, RGWGetUserHeader_CB *cb);
+ int cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y);
+ int cls_user_get_header(const DoutPrefixProvider *dpp, const rgw_user& user, cls_user_header *header, optional_yield y);
+ int cls_user_get_header_async(const DoutPrefixProvider *dpp, const string& user, RGWGetUserHeader_CB *cb);
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
public:
/* user buckets directory */
- int add_bucket(RGWSI_MetaBackend::Context *ctx,
+ int add_bucket(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const rgw_bucket& bucket,
ceph::real_time creation_time,
optional_yield y) override;
- int remove_bucket(RGWSI_MetaBackend::Context *ctx,
+ int remove_bucket(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const rgw_bucket& _bucket,
optional_yield y) override;
- int list_buckets(RGWSI_MetaBackend::Context *ctx,
+ int list_buckets(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const string& marker,
const string& end_marker,
optional_yield y) override;
/* quota related */
- int flush_bucket_stats(RGWSI_MetaBackend::Context *ctx,
+ int flush_bucket_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
const RGWBucketEnt& ent, optional_yield y) override;
- int complete_flush_stats(RGWSI_MetaBackend::Context *ctx,
+ int complete_flush_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, optional_yield y) override;
- int reset_bucket_stats(RGWSI_MetaBackend::Context *ctx,
+ int reset_bucket_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
optional_yield y) override;
- int read_stats(RGWSI_MetaBackend::Context *ctx,
+ int read_stats(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, RGWStorageStats *stats,
ceph::real_time *last_stats_sync, /* last time a full stats sync completed */
ceph::real_time *last_stats_update,
optional_yield y) override; /* last time a stats update was done */
- int read_stats_async(RGWSI_MetaBackend::Context *ctx,
+ int read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx,
const rgw_user& user, RGWGetUserStats_CB *cb) override;
};
return ret;
}
- ret = realm->init(cct, sysobj_svc, y);
+ ret = realm->init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "failed reading realm info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
} else if (ret != -ENOENT) {
ldpp_dout(dpp, 20) << "realm " << realm->get_name() << " " << realm->get_id() << dendl;
- ret = current_period->init(cct, sysobj_svc, realm->get_id(), y,
+ ret = current_period->init(dpp, cct, sysobj_svc, realm->get_id(), y,
realm->get_name());
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "failed reading current period info: " << " " << cpp_strerror(-ret) << dendl;
bool zg_initialized = false;
if (!current_period->get_id().empty()) {
- ret = init_zg_from_period(&zg_initialized, y);
+ ret = init_zg_from_period(dpp, &zg_initialized, y);
if (ret < 0) {
return ret;
}
bool creating_defaults = false;
bool using_local = (!zg_initialized);
if (using_local) {
- ldout(cct, 10) << " cannot find current period zonegroup using local zonegroup" << dendl;
+ ldpp_dout(dpp, 10) << " cannot find current period zonegroup using local zonegroup" << dendl;
ret = init_zg_from_local(dpp, &creating_defaults, y);
if (ret < 0) {
return ret;
}
// read period_config into current_period
auto& period_config = current_period->get_config();
- ret = period_config.read(sysobj_svc, zonegroup->realm_id, y);
+ ret = period_config.read(dpp, sysobj_svc, zonegroup->realm_id, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "ERROR: failed to read period config: "
+ ldpp_dout(dpp, 0) << "ERROR: failed to read period config: "
<< cpp_strerror(ret) << dendl;
return ret;
}
}
- ldout(cct, 10) << "Cannot find current period zone using local zone" << dendl;
+ ldpp_dout(dpp, 10) << "Cannot find current period zone using local zone" << dendl;
if (creating_defaults && cct->_conf->rgw_zone.empty()) {
- ldout(cct, 10) << " Using default name "<< default_zone_name << dendl;
+ ldpp_dout(dpp, 10) << " Using default name "<< default_zone_name << dendl;
zone_params->set_name(default_zone_name);
}
- ret = zone_params->init(cct, sysobj_svc, y);
+ ret = zone_params->init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
- lderr(cct) << "failed reading zone info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "failed reading zone info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
}
auto zone_iter = zonegroup->zones.find(zone_params->get_id());
if (zone_iter == zonegroup->zones.end()) {
if (using_local) {
- lderr(cct) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl;
+ ldpp_dout(dpp, -1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl;
return -EINVAL;
}
- ldout(cct, 1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << "), switching to local zonegroup configuration" << dendl;
+ ldpp_dout(dpp, 1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << "), switching to local zonegroup configuration" << dendl;
ret = init_zg_from_local(dpp, &creating_defaults, y);
if (ret < 0) {
return ret;
}
if (zone_iter != zonegroup->zones.end()) {
*zone_public_config = zone_iter->second;
- ldout(cct, 20) << "zone " << zone_params->get_name() << " found" << dendl;
+ ldpp_dout(dpp, 20) << "zone " << zone_params->get_name() << " found" << dendl;
} else {
- lderr(cct) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl;
+ ldpp_dout(dpp, -1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl;
return -EINVAL;
}
for (auto ziter : zonegroup->zones) {
auto zone_handler = std::make_shared<RGWBucketSyncPolicyHandler>(this, sync_modules_svc, bucket_sync_svc, ziter.second.id);
- ret = zone_handler->init(y);
+ ret = zone_handler->init(dpp, y);
if (ret < 0) {
- lderr(cct) << "ERROR: could not initialize zone policy handler for zone=" << ziter.second.name << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: could not initialize zone policy handler for zone=" << ziter.second.name << dendl;
return ret;
}
sync_policy_handlers[ziter.second.id] = zone_handler;
auto sync_modules = sync_modules_svc->get_manager();
RGWSyncModuleRef sm;
if (!sync_modules->get_module(zone_public_config->tier_type, &sm)) {
- lderr(cct) << "ERROR: tier type not found: " << zone_public_config->tier_type << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: tier type not found: " << zone_public_config->tier_type << dendl;
return -EINVAL;
}
}
if (zone_by_id.find(zone_id()) == zone_by_id.end()) {
- ldout(cct, 0) << "WARNING: could not find zone config in zonegroup for local zone (" << zone_id() << "), will use defaults" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: could not find zone config in zonegroup for local zone (" << zone_id() << "), will use defaults" << dendl;
}
for (const auto& ziter : zonegroup->zones) {
continue;
}
if (z.endpoints.empty()) {
- ldout(cct, 0) << "WARNING: can't generate connection for zone " << z.id << " id " << z.name << ": no endpoints defined" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: can't generate connection for zone " << z.id << " id " << z.name << ": no endpoints defined" << dendl;
continue;
}
- ldout(cct, 20) << "generating connection object for zone " << z.name << " id " << z.id << dendl;
+ ldpp_dout(dpp, 20) << "generating connection object for zone " << z.name << " id " << z.id << dendl;
RGWRESTConn *conn = new RGWRESTConn(cct, this, z.id, z.endpoints, zonegroup->api_name);
zone_conn_map[id] = conn;
zone_data_notify_to_map[id] = conn;
}
} else {
- ldout(cct, 20) << "NOTICE: not syncing to/from zone " << z.name << " id " << z.id << dendl;
+ ldpp_dout(dpp, 20) << "NOTICE: not syncing to/from zone " << z.name << " id " << z.id << dendl;
}
}
- ldout(cct, 20) << "started zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() <<
+ ldpp_dout(dpp, 20) << "started zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() <<
") with tier type = " << zone_public_config->tier_type << dendl;
return 0;
}
}
-int RGWSI_Zone::list_regions(list<string>& regions)
+int RGWSI_Zone::list_regions(const DoutPrefixProvider *dpp, list<string>& regions)
{
RGWZoneGroup zonegroup;
RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zonegroup.get_pool(cct));
- return syspool.list_prefixed_objs(region_info_oid_prefix, ®ions);
+ return syspool.list_prefixed_objs(dpp, region_info_oid_prefix, ®ions);
}
-int RGWSI_Zone::list_zonegroups(list<string>& zonegroups)
+int RGWSI_Zone::list_zonegroups(const DoutPrefixProvider *dpp, list<string>& zonegroups)
{
RGWZoneGroup zonegroup;
RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zonegroup.get_pool(cct));
- return syspool.list_prefixed_objs(zonegroup_names_oid_prefix, &zonegroups);
+ return syspool.list_prefixed_objs(dpp, zonegroup_names_oid_prefix, &zonegroups);
}
-int RGWSI_Zone::list_zones(list<string>& zones)
+int RGWSI_Zone::list_zones(const DoutPrefixProvider *dpp, list<string>& zones)
{
RGWZoneParams zoneparams;
RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zoneparams.get_pool(cct));
- return syspool.list_prefixed_objs(zone_names_oid_prefix, &zones);
+ return syspool.list_prefixed_objs(dpp, zone_names_oid_prefix, &zones);
}
-int RGWSI_Zone::list_realms(list<string>& realms)
+int RGWSI_Zone::list_realms(const DoutPrefixProvider *dpp, list<string>& realms)
{
RGWRealm realm(cct, sysobj_svc);
RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(realm.get_pool(cct));
- return syspool.list_prefixed_objs(realm_names_oid_prefix, &realms);
+ return syspool.list_prefixed_objs(dpp, realm_names_oid_prefix, &realms);
}
-int RGWSI_Zone::list_periods(list<string>& periods)
+int RGWSI_Zone::list_periods(const DoutPrefixProvider *dpp, list<string>& periods)
{
RGWPeriod period;
list<string> raw_periods;
RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(period.get_pool(cct));
- int ret = syspool.list_prefixed_objs(period.get_info_oid_prefix(), &raw_periods);
+ int ret = syspool.list_prefixed_objs(dpp, period.get_info_oid_prefix(), &raw_periods);
if (ret < 0) {
return ret;
}
}
-int RGWSI_Zone::list_periods(const string& current_period, list<string>& periods, optional_yield y)
+int RGWSI_Zone::list_periods(const DoutPrefixProvider *dpp, const string& current_period, list<string>& periods, optional_yield y)
{
int ret = 0;
string period_id = current_period;
while(!period_id.empty()) {
RGWPeriod period(period_id);
- ret = period.init(cct, sysobj_svc, y);
+ ret = period.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
return ret;
}
RGWSysObjectCtx obj_ctx = sysobj_svc->init_obj_ctx();
RGWSysObj sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << __func__ << " failed to read converted: ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << __func__ << " failed to read converted: ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
} else if (ret != -ENOENT) {
- ldout(cct, 20) << "System already converted " << dendl;
+ ldpp_dout(dpp, 20) << "System already converted " << dendl;
return 0;
}
string default_region;
- ret = default_zonegroup.init(cct, sysobj_svc, y, false, true);
+ ret = default_zonegroup.init(dpp, cct, sysobj_svc, y, false, true);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed init default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed init default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = default_zonegroup.read_default_id(default_region, y, true);
+ ret = default_zonegroup.read_default_id(dpp, default_region, y, true);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << __func__ << " failed reading old default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed reading old default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
}
/* convert regions to zonegroups */
list<string> regions;
- ret = list_regions(regions);
+ ret = list_regions(dpp, regions);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << __func__ << " failed to list regions: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to list regions: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
} else if (ret == -ENOENT || regions.empty()) {
RGWZoneParams zoneparams(default_zone_name);
- int ret = zoneparams.init(cct, sysobj_svc, y);
+ int ret = zoneparams.init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << __func__ << ": error initializing default zone params: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << ": error initializing default zone params: " << cpp_strerror(-ret) << dendl;
return ret;
}
/* update master zone */
RGWZoneGroup default_zg(default_zonegroup_name);
- ret = default_zg.init(cct, sysobj_svc, y);
+ ret = default_zg.init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << __func__ << ": error in initializing default zonegroup: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << ": error in initializing default zonegroup: " << cpp_strerror(-ret) << dendl;
return ret;
}
if (ret != -ENOENT && default_zg.master_zone.empty()) {
default_zg.master_zone = zoneparams.get_id();
- return default_zg.update(y);
+ return default_zg.update(dpp, y);
}
return 0;
}
for (list<string>::iterator iter = regions.begin(); iter != regions.end(); ++iter) {
if (*iter != default_zonegroup_name){
RGWZoneGroup region(*iter);
- int ret = region.init(cct, sysobj_svc, y, true, true);
+ int ret = region.init(dpp, cct, sysobj_svc, y, true, true);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed init region "<< *iter << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed init region "<< *iter << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
if (region.is_master_zonegroup()) {
buf_to_hex(md5, CEPH_CRYPTO_MD5_DIGESTSIZE, md5_str);
string new_realm_id(md5_str);
RGWRealm new_realm(new_realm_id,new_realm_name);
- ret = new_realm.init(cct, sysobj_svc, y, false);
+ ret = new_realm.init(dpp, cct, sysobj_svc, y, false);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " Error initing new realm: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Error initing new realm: " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = new_realm.create(dpp, y);
ldpp_dout(dpp, 0) << __func__ << " Error creating new realm: " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = new_realm.set_as_default(y);
+ ret = new_realm.set_as_default(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " Error setting realm as default: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Error setting realm as default: " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = realm->init(cct, sysobj_svc, y);
+ ret = realm->init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " Error initing realm: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Error initing realm: " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = current_period->init(cct, sysobj_svc, realm->get_id(), y,
+ ret = current_period->init(dpp, cct, sysobj_svc, realm->get_id(), y,
realm->get_name());
if (ret < 0) {
- ldout(cct, 0) << __func__ << " Error initing current period: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Error initing current period: " << cpp_strerror(-ret) << dendl;
return ret;
}
}
/* create zonegroups */
for (iter = regions.begin(); iter != regions.end(); ++iter)
{
- ldout(cct, 0) << __func__ << " Converting " << *iter << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Converting " << *iter << dendl;
/* check to see if we don't have already a zonegroup with this name */
RGWZoneGroup new_zonegroup(*iter);
- ret = new_zonegroup.init(cct , sysobj_svc, y);
+ ret = new_zonegroup.init(dpp, cct , sysobj_svc, y);
if (ret == 0 && new_zonegroup.get_id() != *iter) {
- ldout(cct, 0) << __func__ << " zonegroup "<< *iter << " already exists id " << new_zonegroup.get_id () <<
+ ldpp_dout(dpp, 0) << __func__ << " zonegroup "<< *iter << " already exists id " << new_zonegroup.get_id () <<
" skipping conversion " << dendl;
continue;
}
RGWZoneGroup zonegroup(*iter);
zonegroup.set_id(*iter);
- int ret = zonegroup.init(cct, sysobj_svc, y, true, true);
+ int ret = zonegroup.init(dpp, cct, sysobj_svc, y, true, true);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed init zonegroup: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed init zonegroup: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
}
zonegroup.realm_id = realm->get_id();
/* fix default region master zone */
if (*iter == default_zonegroup_name && zonegroup.master_zone.empty()) {
- ldout(cct, 0) << __func__ << " Setting default zone as master for default region" << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Setting default zone as master for default region" << dendl;
zonegroup.master_zone = default_zone_name;
}
- ret = zonegroup.update(y);
+ ret = zonegroup.update(dpp, y);
if (ret < 0 && ret != -EEXIST) {
- ldout(cct, 0) << __func__ << " failed to update zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << __func__ << " failed to update zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
- ret = zonegroup.update_name(y);
+ ret = zonegroup.update_name(dpp, y);
if (ret < 0 && ret != -EEXIST) {
- ldout(cct, 0) << __func__ << " failed to update_name for zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << __func__ << " failed to update_name for zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
if (zonegroup.get_name() == default_region) {
- ret = zonegroup.set_as_default(y);
+ ret = zonegroup.set_as_default(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed to set_as_default " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << __func__ << " failed to set_as_default " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
}
for (auto iter = zonegroup.zones.begin(); iter != zonegroup.zones.end();
++iter) {
- ldout(cct, 0) << __func__ << " Converting zone" << iter->first << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Converting zone" << iter->first << dendl;
RGWZoneParams zoneparams(iter->first, iter->second.name);
zoneparams.set_id(iter->first.id);
zoneparams.realm_id = realm->get_id();
- ret = zoneparams.init(cct, sysobj_svc, y);
+ ret = zoneparams.init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl;
return ret;
} else if (ret == -ENOENT) {
- ldout(cct, 0) << __func__ << " zone is part of another cluster " << iter->first << " skipping " << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " zone is part of another cluster " << iter->first << " skipping " << dendl;
continue;
}
zonegroup.realm_id = realm->get_id();
- ret = zoneparams.update(y);
+ ret = zoneparams.update(dpp, y);
if (ret < 0 && ret != -EEXIST) {
- ldout(cct, 0) << __func__ << " failed to update zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to update zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = zoneparams.update_name(y);
+ ret = zoneparams.update_name(dpp, y);
if (ret < 0 && ret != -EEXIST) {
- ldout(cct, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
}
if (!current_period->get_id().empty()) {
- ret = current_period->add_zonegroup(zonegroup, y);
+ ret = current_period->add_zonegroup(dpp, zonegroup, y);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed to add zonegroup to current_period: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to add zonegroup to current_period: " << cpp_strerror(-ret) << dendl;
return ret;
}
}
}
if (!current_period->get_id().empty()) {
- ret = current_period->update(y);
+ ret = current_period->update(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed to update new period: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to update new period: " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = current_period->store_info(false, y);
+ ret = current_period->store_info(dpp, false, y);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed to store new period: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to store new period: " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = current_period->reflect(y);
+ ret = current_period->reflect(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed to update local objects: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed to update local objects: " << cpp_strerror(-ret) << dendl;
return ret;
}
}
for (auto const& iter : regions) {
RGWZoneGroup zonegroup(iter);
- int ret = zonegroup.init(cct, sysobj_svc, y, true, true);
+ int ret = zonegroup.init(dpp, cct, sysobj_svc, y, true, true);
if (ret < 0) {
- ldout(cct, 0) << __func__ << " failed init zonegroup" << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " failed init zonegroup" << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = zonegroup.delete_obj(y, true);
+ ret = zonegroup.delete_obj(dpp, y, true);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << __func__ << " failed to delete region " << iter << ": ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << __func__ << " failed to delete region " << iter << ": ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
/* mark as converted */
ret = sysobj.wop()
.set_exclusive(true)
- .write(bl, y);
+ .write(dpp, bl, y);
if (ret < 0 ) {
- ldout(cct, 0) << __func__ << " failed to mark cluster as converted: ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << __func__ << " failed to mark cluster as converted: ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
zonegroup_conn_map[zonegroup.get_id()] = new_connection;
}
-int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y)
+int RGWSI_Zone::init_zg_from_period(const DoutPrefixProvider *dpp, bool *initialized, optional_yield y)
{
*initialized = false;
return 0;
}
- int ret = zonegroup->init(cct, sysobj_svc, y);
- ldout(cct, 20) << "period zonegroup init ret " << ret << dendl;
+ int ret = zonegroup->init(dpp, cct, sysobj_svc, y);
+ ldpp_dout(dpp, 20) << "period zonegroup init ret " << ret << dendl;
if (ret == -ENOENT) {
return 0;
}
if (ret < 0) {
- ldout(cct, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl;
return ret;
}
- ldout(cct, 20) << "period zonegroup name " << zonegroup->get_name() << dendl;
+ ldpp_dout(dpp, 20) << "period zonegroup name " << zonegroup->get_name() << dendl;
map<string, RGWZoneGroup>::const_iterator iter =
current_period->get_map().zonegroups.find(zonegroup->get_id());
if (iter != current_period->get_map().zonegroups.end()) {
- ldout(cct, 20) << "using current period zonegroup " << zonegroup->get_name() << dendl;
+ ldpp_dout(dpp, 20) << "using current period zonegroup " << zonegroup->get_name() << dendl;
*zonegroup = iter->second;
- ret = zonegroup->init(cct, sysobj_svc, y, false);
+ ret = zonegroup->init(dpp, cct, sysobj_svc, y, false);
if (ret < 0) {
- ldout(cct, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl;
return ret;
}
- ret = zone_params->init(cct, sysobj_svc, y);
+ ret = zone_params->init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl;
return ret;
} if (ret ==-ENOENT && zonegroup->get_name() == default_zonegroup_name) {
- ldout(cct, 10) << " Using default name "<< default_zone_name << dendl;
+ ldpp_dout(dpp, 10) << " Using default name "<< default_zone_name << dendl;
zone_params->set_name(default_zone_name);
- ret = zone_params->init(cct, sysobj_svc, y);
+ ret = zone_params->init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl;
return ret;
}
}
// fix missing master zone for a single zone zonegroup
if (zg.master_zone.empty() && zg.zones.size() == 1) {
master = zg.zones.begin();
- ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing master_zone, setting zone " <<
+ ldpp_dout(dpp, 0) << "zonegroup " << zg.get_name() << " missing master_zone, setting zone " <<
master->second.name << " id:" << master->second.id << " as master" << dendl;
if (zonegroup->get_id() == zg.get_id()) {
zonegroup->master_zone = master->second.id;
- ret = zonegroup->update(y);
+ ret = zonegroup->update(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
}
} else {
RGWZoneGroup fixed_zg(zg.get_id(),zg.get_name());
- ret = fixed_zg.init(cct, sysobj_svc, y);
+ ret = fixed_zg.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
}
fixed_zg.master_zone = master->second.id;
- ret = fixed_zg.update(y);
+ ret = fixed_zg.update(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
}
}
} else {
- ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing zone for master_zone=" <<
+ ldpp_dout(dpp, 0) << "zonegroup " << zg.get_name() << " missing zone for master_zone=" <<
zg.master_zone << dendl;
return -EINVAL;
}
int RGWSI_Zone::init_zg_from_local(const DoutPrefixProvider *dpp, bool *creating_defaults, optional_yield y)
{
- int ret = zonegroup->init(cct, sysobj_svc, y);
+ int ret = zonegroup->init(dpp, cct, sysobj_svc, y);
if ( (ret < 0 && ret != -ENOENT) || (ret == -ENOENT && !cct->_conf->rgw_zonegroup.empty())) {
- ldout(cct, 0) << "failed reading zonegroup info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading zonegroup info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
} else if (ret == -ENOENT) {
*creating_defaults = true;
- ldout(cct, 10) << "Creating default zonegroup " << dendl;
+ ldpp_dout(dpp, 10) << "Creating default zonegroup " << dendl;
ret = zonegroup->create_default(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
- ret = zonegroup->init(cct, sysobj_svc, y);
+ ret = zonegroup->init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
- ldout(cct, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
// fix missing master zone for a single zone zonegroup
if (zonegroup->master_zone.empty() && zonegroup->zones.size() == 1) {
master = zonegroup->zones.begin();
- ldout(cct, 0) << "zonegroup " << zonegroup->get_name() << " missing master_zone, setting zone " <<
+ ldpp_dout(dpp, 0) << "zonegroup " << zonegroup->get_name() << " missing master_zone, setting zone " <<
master->second.name << " id:" << master->second.id << " as master" << dendl;
zonegroup->master_zone = master->second.id;
- ret = zonegroup->update(y);
+ ret = zonegroup->update(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
}
} else {
- ldout(cct, 0) << "zonegroup " << zonegroup->get_name() << " missing zone for "
+ ldpp_dout(dpp, 0) << "zonegroup " << zonegroup->get_name() << " missing zone for "
"master_zone=" << zonegroup->master_zone << dendl;
return -EINVAL;
}
RGWSysObjectCtx obj_ctx = sysobj_svc->init_obj_ctx();
RGWSysObj sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid));
- int ret = sysobj.rop().read(&bl, y);
+ int ret = sysobj.rop().read(dpp, &bl, y);
if (ret < 0 && ret != -ENOENT) {
return ret;
} else if (ret == -ENOENT) {
auto iter = bl.cbegin();
decode(zonegroupmap, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "error decoding regionmap from " << pool << ":" << oid << dendl;
+ ldpp_dout(dpp, 0) << "error decoding regionmap from " << pool << ":" << oid << dendl;
return -EIO;
}
for (map<string, RGWZoneGroup>::iterator iter = zonegroupmap.zonegroups.begin();
iter != zonegroupmap.zonegroups.end(); ++iter) {
RGWZoneGroup& zonegroup = iter->second;
- ret = zonegroup.init(cct, sysobj_svc, y, false);
- ret = zonegroup.update(y);
+ ret = zonegroup.init(dpp, cct, sysobj_svc, y, false);
+ ret = zonegroup.update(dpp, y);
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "Error could not update zonegroup " << zonegroup.get_name() << ": " <<
+ ldpp_dout(dpp, 0) << "Error could not update zonegroup " << zonegroup.get_name() << ": " <<
cpp_strerror(-ret) << dendl;
return ret;
} else if (ret == -ENOENT) {
current_period->set_bucket_quota(zonegroupmap.bucket_quota);
// remove the region_map so we don't try to convert again
- ret = sysobj.wop().remove(y);
+ ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "Error could not remove " << sysobj.get_obj()
+ ldpp_dout(dpp, 0) << "Error could not remove " << sysobj.get_obj()
<< " after upgrading to zonegroup map: " << cpp_strerror(ret) << dendl;
return ret;
}
}
-int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const string& zonegroup_id,
+int RGWSI_Zone::select_new_bucket_location(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id,
const rgw_placement_rule& request_rule,
rgw_placement_rule *pselected_rule_name, RGWZonePlacementInfo *rule_info,
optional_yield y)
RGWZoneGroup zonegroup;
int ret = get_zonegroup(zonegroup_id, zonegroup);
if (ret < 0) {
- ldout(cct, 0) << "could not find zonegroup " << zonegroup_id << " in current period" << dendl;
+ ldpp_dout(dpp, 0) << "could not find zonegroup " << zonegroup_id << " in current period" << dendl;
return ret;
}
used_rule = &request_rule;
titer = zonegroup.placement_targets.find(request_rule.name);
if (titer == zonegroup.placement_targets.end()) {
- ldout(cct, 0) << "could not find requested placement id " << request_rule
+ ldpp_dout(dpp, 0) << "could not find requested placement id " << request_rule
<< " within zonegroup " << dendl;
return -ERR_INVALID_LOCATION_CONSTRAINT;
}
used_rule = &user_info.default_placement;
titer = zonegroup.placement_targets.find(user_info.default_placement.name);
if (titer == zonegroup.placement_targets.end()) {
- ldout(cct, 0) << "could not find user default placement id " << user_info.default_placement
+ ldpp_dout(dpp, 0) << "could not find user default placement id " << user_info.default_placement
<< " within zonegroup " << dendl;
return -ERR_INVALID_LOCATION_CONSTRAINT;
}
} else {
if (zonegroup.default_placement.name.empty()) { // zonegroup default rule as fallback, it should not be empty.
- ldout(cct, 0) << "misconfiguration, zonegroup default placement id should not be empty." << dendl;
+ ldpp_dout(dpp, 0) << "misconfiguration, zonegroup default placement id should not be empty." << dendl;
return -ERR_ZONEGROUP_DEFAULT_PLACEMENT_MISCONFIGURATION;
} else {
used_rule = &zonegroup.default_placement;
titer = zonegroup.placement_targets.find(zonegroup.default_placement.name);
if (titer == zonegroup.placement_targets.end()) {
- ldout(cct, 0) << "could not find zonegroup default placement id " << zonegroup.default_placement
+ ldpp_dout(dpp, 0) << "could not find zonegroup default placement id " << zonegroup.default_placement
<< " within zonegroup " << dendl;
return -ERR_INVALID_LOCATION_CONSTRAINT;
}
/* now check tag for the rule, whether user is permitted to use rule */
const auto& target_rule = titer->second;
if (!target_rule.user_permitted(user_info.placement_tags)) {
- ldout(cct, 0) << "user not permitted to use placement rule " << titer->first << dendl;
+ ldpp_dout(dpp, 0) << "user not permitted to use placement rule " << titer->first << dendl;
return -EPERM;
}
*pselected_rule_name = rule;
}
- return select_bucket_location_by_rule(rule, rule_info, y);
+ return select_bucket_location_by_rule(dpp, rule, rule_info, y);
}
-int RGWSI_Zone::select_bucket_location_by_rule(const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y)
+int RGWSI_Zone::select_bucket_location_by_rule(const DoutPrefixProvider *dpp, const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y)
{
if (location_rule.name.empty()) {
/* we can only reach here if we're trying to set a bucket location from a bucket
* created on a different zone, using a legacy / default pool configuration
*/
if (rule_info) {
- return select_legacy_bucket_placement(rule_info, y);
+ return select_legacy_bucket_placement(dpp, rule_info, y);
}
return 0;
auto piter = zone_params->placement_pools.find(location_rule.name);
if (piter == zone_params->placement_pools.end()) {
/* couldn't find, means we cannot really place data for this bucket in this zone */
- ldout(cct, 0) << "ERROR: This zone does not contain placement rule "
+ ldpp_dout(dpp, 0) << "ERROR: This zone does not contain placement rule "
<< location_rule << " present in the zonegroup!" << dendl;
return -EINVAL;
}
auto storage_class = location_rule.get_storage_class();
if (!piter->second.storage_class_exists(storage_class)) {
- ldout(cct, 5) << "requested storage class does not exist: " << storage_class << dendl;
+ ldpp_dout(dpp, 5) << "requested storage class does not exist: " << storage_class << dendl;
return -EINVAL;
}
return 0;
}
-int RGWSI_Zone::select_bucket_placement(const RGWUserInfo& user_info, const string& zonegroup_id,
+int RGWSI_Zone::select_bucket_placement(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id,
const rgw_placement_rule& placement_rule,
rgw_placement_rule *pselected_rule, RGWZonePlacementInfo *rule_info,
optional_yield y)
{
if (!zone_params->placement_pools.empty()) {
- return select_new_bucket_location(user_info, zonegroup_id, placement_rule,
+ return select_new_bucket_location(dpp, user_info, zonegroup_id, placement_rule,
pselected_rule, rule_info, y);
}
}
if (rule_info) {
- return select_legacy_bucket_placement(rule_info, y);
+ return select_legacy_bucket_placement(dpp, rule_info, y);
}
return 0;
}
-int RGWSI_Zone::select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info,
+int RGWSI_Zone::select_legacy_bucket_placement(const DoutPrefixProvider *dpp, RGWZonePlacementInfo *rule_info,
optional_yield y)
{
bufferlist map_bl;
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(obj);
- int ret = sysobj.rop().read(&map_bl, y);
+ int ret = sysobj.rop().read(dpp, &map_bl, y);
if (ret < 0) {
goto read_omap;
}
auto iter = map_bl.cbegin();
decode(m, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: couldn't decode avail_pools" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode avail_pools" << dendl;
}
read_omap:
if (m.empty()) {
- ret = sysobj.omap().get_all(&m, y);
+ ret = sysobj.omap().get_all(dpp, &m, y);
write_map = true;
}
ret = rados_svc->pool().create(pools, &retcodes);
if (ret < 0)
return ret;
- ret = sysobj.omap().set(s, bl, y);
+ ret = sysobj.omap().set(dpp, s, bl, y);
if (ret < 0)
return ret;
m[s] = bl;
if (write_map) {
bufferlist new_bl;
encode(m, new_bl);
- ret = sysobj.wop().write(new_bl, y);
+ ret = sysobj.wop().write(dpp, new_bl, y);
if (ret < 0) {
- ldout(cct, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl;
}
}
return 0;
}
-int RGWSI_Zone::update_placement_map(optional_yield y)
+int RGWSI_Zone::update_placement_map(const DoutPrefixProvider *dpp, optional_yield y)
{
bufferlist header;
map<string, bufferlist> m;
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(obj);
- int ret = sysobj.omap().get_all(&m, y);
+ int ret = sysobj.omap().get_all(dpp, &m, y);
if (ret < 0)
return ret;
bufferlist new_bl;
encode(m, new_bl);
- ret = sysobj.wop().write(new_bl, y);
+ ret = sysobj.wop().write(dpp, new_bl, y);
if (ret < 0) {
- ldout(cct, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl;
}
return ret;
}
-int RGWSI_Zone::add_bucket_placement(const rgw_pool& new_pool, optional_yield y)
+int RGWSI_Zone::add_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& new_pool, optional_yield y)
{
int ret = rados_svc->pool(new_pool).lookup();
if (ret < 0) { // DNE, or something
auto sysobj = obj_ctx.get_obj(obj);
bufferlist empty_bl;
- ret = sysobj.omap().set(new_pool.to_str(), empty_bl, y);
+ ret = sysobj.omap().set(dpp, new_pool.to_str(), empty_bl, y);
// don't care about return value
- update_placement_map(y);
+ update_placement_map(dpp, y);
return ret;
}
-int RGWSI_Zone::remove_bucket_placement(const rgw_pool& old_pool, optional_yield y)
+int RGWSI_Zone::remove_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& old_pool, optional_yield y)
{
rgw_raw_obj obj(zone_params->domain_root, avail_pools);
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(obj);
- int ret = sysobj.omap().del(old_pool.to_str(), y);
+ int ret = sysobj.omap().del(dpp, old_pool.to_str(), y);
// don't care about return value
- update_placement_map(y);
+ update_placement_map(dpp, y);
return ret;
}
-int RGWSI_Zone::list_placement_set(set<rgw_pool>& names, optional_yield y)
+int RGWSI_Zone::list_placement_set(const DoutPrefixProvider *dpp, set<rgw_pool>& names, optional_yield y)
{
bufferlist header;
map<string, bufferlist> m;
rgw_raw_obj obj(zone_params->domain_root, avail_pools);
auto obj_ctx = sysobj_svc->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(obj);
- int ret = sysobj.omap().get_all(&m, y);
+ int ret = sysobj.omap().get_all(dpp, &m, y);
if (ret < 0)
return ret;
void shutdown() override;
int replace_region_with_zonegroup(const DoutPrefixProvider *dpp, optional_yield y);
- int init_zg_from_period(bool *initialized, optional_yield y);
+ int init_zg_from_period(const DoutPrefixProvider *dpp, bool *initialized, optional_yield y);
int init_zg_from_local(const DoutPrefixProvider *dpp, bool *creating_defaults, optional_yield y);
int convert_regionmap(const DoutPrefixProvider *dpp, optional_yield y);
- int update_placement_map(optional_yield y);
+ int update_placement_map(const DoutPrefixProvider *dpp, optional_yield y);
public:
RGWSI_Zone(CephContext *cct);
~RGWSI_Zone();
RGWRESTConn *get_zone_conn_by_name(const string& name);
bool find_zone_id_by_name(const string& name, rgw_zone_id *id);
- int select_bucket_placement(const RGWUserInfo& user_info, const string& zonegroup_id,
+ int select_bucket_placement(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id,
const rgw_placement_rule& rule,
rgw_placement_rule *pselected_rule, RGWZonePlacementInfo *rule_info, optional_yield y);
- int select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, optional_yield y);
- int select_new_bucket_location(const RGWUserInfo& user_info, const string& zonegroup_id,
+ int select_legacy_bucket_placement(const DoutPrefixProvider *dpp, RGWZonePlacementInfo *rule_info, optional_yield y);
+ int select_new_bucket_location(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id,
const rgw_placement_rule& rule,
rgw_placement_rule *pselected_rule_name, RGWZonePlacementInfo *rule_info,
optional_yield y);
- int select_bucket_location_by_rule(const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y);
+ int select_bucket_location_by_rule(const DoutPrefixProvider *dpp, const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y);
- int add_bucket_placement(const rgw_pool& new_pool, optional_yield y);
- int remove_bucket_placement(const rgw_pool& old_pool, optional_yield y);
- int list_placement_set(set<rgw_pool>& names, optional_yield y);
+ int add_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& new_pool, optional_yield y);
+ int remove_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& old_pool, optional_yield y);
+ int list_placement_set(const DoutPrefixProvider *dpp, set<rgw_pool>& names, optional_yield y);
bool is_meta_master() const;
bool can_reshard() const;
bool is_syncing_bucket_meta(const rgw_bucket& bucket);
- int list_zonegroups(list<string>& zonegroups);
- int list_regions(list<string>& regions);
- int list_zones(list<string>& zones);
- int list_realms(list<string>& realms);
- int list_periods(list<string>& periods);
- int list_periods(const string& current_period, list<string>& periods, optional_yield y);
+ int list_zonegroups(const DoutPrefixProvider *dpp, list<string>& zonegroups);
+ int list_regions(const DoutPrefixProvider *dpp, list<string>& regions);
+ int list_zones(const DoutPrefixProvider *dpp, list<string>& zones);
+ int list_realms(const DoutPrefixProvider *dpp, list<string>& realms);
+ int list_periods(const DoutPrefixProvider *dpp, list<string>& periods);
+ int list_periods(const DoutPrefixProvider *dpp, const string& current_period, list<string>& periods, optional_yield y);
};
#include "include/scope_guard.h"
#include "include/types.h"
#include "include/rados/librados.hpp"
+#include "common/ceph_context.h"
#include "cls/fifo/cls_fifo_ops.h"
#include "test/librados/test_cxx.h"
namespace fifo = rados::cls::fifo;
namespace RCf = rgw::cls::fifo;
+auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
+const DoutPrefix dp(cct, 1, "test legacy cls fifo: ");
+
namespace {
-int fifo_create(R::IoCtx& ioctx,
+int fifo_create(const DoutPrefixProvider *dpp, R::IoCtx& ioctx,
const std::string& oid,
std::string_view id,
optional_yield y,
R::ObjectWriteOperation op;
RCf::create_meta(&op, id, objv, oid_prefix, exclusive, max_part_size,
max_entry_size);
- return rgw_rados_operate(ioctx, oid, &op, y);
+ return rgw_rados_operate(dpp, ioctx, oid, &op, y);
}
}
TEST_F(LegacyClsFIFO, TestCreate)
{
- auto r = fifo_create(ioctx, fifo_id, ""s, null_yield);
+ auto r = fifo_create(&dp, ioctx, fifo_id, ""s, null_yield);
EXPECT_EQ(-EINVAL, r);
- r = fifo_create(ioctx, fifo_id, fifo_id, null_yield, std::nullopt,
+ r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield, std::nullopt,
std::nullopt, false, 0);
EXPECT_EQ(-EINVAL, r);
- r = fifo_create(ioctx, fifo_id, {}, null_yield,
+ r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield,
std::nullopt, std::nullopt,
false, RCf::default_max_part_size, 0);
EXPECT_EQ(-EINVAL, r);
- r = fifo_create(ioctx, fifo_id, fifo_id, null_yield);
+ r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield);
EXPECT_EQ(0, r);
std::uint64_t size;
ioctx.stat(fifo_id, &size, nullptr);
EXPECT_GT(size, 0);
/* test idempotency */
- r = fifo_create(ioctx, fifo_id, fifo_id, null_yield);
+ r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield);
EXPECT_EQ(0, r);
- r = fifo_create(ioctx, fifo_id, {}, null_yield, std::nullopt,
+ r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt,
std::nullopt, false);
EXPECT_EQ(-EINVAL, r);
- r = fifo_create(ioctx, fifo_id, {}, null_yield, std::nullopt,
+ r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt,
"myprefix"sv, false);
EXPECT_EQ(-EINVAL, r);
- r = fifo_create(ioctx, fifo_id, "foo"sv, null_yield,
+ r = fifo_create(&dp, ioctx, fifo_id, "foo"sv, null_yield,
std::nullopt, std::nullopt, false);
EXPECT_EQ(-EEXIST, r);
}
TEST_F(LegacyClsFIFO, TestGetInfo)
{
- auto r = fifo_create(ioctx, fifo_id, fifo_id, null_yield);
+ auto r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield);
fifo::info info;
std::uint32_t part_header_size;
std::uint32_t part_entry_overhead;
- r = RCf::get_meta(ioctx, fifo_id, std::nullopt, &info, &part_header_size,
+ r = RCf::get_meta(&dp, ioctx, fifo_id, std::nullopt, &info, &part_header_size,
&part_entry_overhead, 0, null_yield);
EXPECT_EQ(0, r);
EXPECT_GT(part_header_size, 0);
EXPECT_GT(part_entry_overhead, 0);
EXPECT_FALSE(info.version.instance.empty());
- r = RCf::get_meta(ioctx, fifo_id, info.version, &info, &part_header_size,
+ r = RCf::get_meta(&dp, ioctx, fifo_id, info.version, &info, &part_header_size,
&part_entry_overhead, 0, null_yield);
EXPECT_EQ(0, r);
fifo::objv objv;
objv.instance = "foo";
objv.ver = 12;
- r = RCf::get_meta(ioctx, fifo_id, objv, &info, &part_header_size,
+ r = RCf::get_meta(&dp, ioctx, fifo_id, objv, &info, &part_header_size,
&part_entry_overhead, 0, null_yield);
EXPECT_EQ(-ECANCELED, r);
}
TEST_F(LegacyFIFO, TestOpenDefault)
{
std::unique_ptr<RCf::FIFO> fifo;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &fifo, null_yield);
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &fifo, null_yield);
ASSERT_EQ(0, r);
// force reading from backend
- r = fifo->read_meta(null_yield);
+ r = fifo->read_meta(&dp, null_yield);
EXPECT_EQ(0, r);
auto info = fifo->meta();
EXPECT_EQ(info.id, fifo_id);
/* first successful create */
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, objv, oid_prefix,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, objv, oid_prefix,
false, max_part_size, max_entry_size);
ASSERT_EQ(0, r);
/* force reading from backend */
- r = f->read_meta(null_yield);
+ r = f->read_meta(&dp, null_yield);
auto info = f->meta();
EXPECT_EQ(info.id, fifo_id);
EXPECT_EQ(info.params.max_part_size, max_part_size);
TEST_F(LegacyFIFO, TestPushListTrim)
{
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield);
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
- r = f->push(bl, null_yield);
+ r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
bool more = false;
for (auto i = 0u; i < max_entries; ++i) {
- r = f->list(1, marker, &result, &more, null_yield);
+ r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
bool expected_more = (i != (max_entries - 1));
/* get all entries at once */
std::string markers[max_entries];
std::uint32_t min_entry = 0;
- r = f->list(max_entries * 10, std::nullopt, &result, &more, null_yield);
+ r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_FALSE(more);
}
/* trim one entry */
- r = f->trim(markers[min_entry], false, null_yield);
+ r = f->trim(&dp, markers[min_entry], false, null_yield);
ASSERT_EQ(0, r);
++min_entry;
- r = f->list(max_entries * 10, std::nullopt, &result, &more, null_yield);
+ r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_FALSE(more);
ASSERT_EQ(max_entries - min_entry, result.size());
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size, max_entry_size);
ASSERT_EQ(0, r);
cb::list bl;
bl.append(buf, sizeof(buf));
- r = f->push(bl, null_yield);
+ r = f->push(&dp, bl, null_yield);
EXPECT_EQ(-E2BIG, r);
}
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
cb::list bl;
*(int *)buf = i;
bl.append(buf, sizeof(buf));
- r = f->push(bl, null_yield);
+ r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
/* list all at once */
std::vector<RCf::list_entry> result;
bool more = false;
- r = f->list(max_entries, std::nullopt, &result, &more, null_yield);
+ r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
EXPECT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
/* get entries one by one */
for (auto i = 0u; i < max_entries; ++i) {
- r = f->list(1, marker, &result, &more, null_yield);
+ r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
marker.reset();
for (auto i = 0u; i < max_entries; ++i) {
/* read single entry */
- r = f->list(1, marker, &result, &more, null_yield);
+ r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
ASSERT_EQ(expected_more, more);
marker = result.front().marker;
- r = f->trim(*marker, false, null_yield);
+ r = f->trim(&dp, *marker, false, null_yield);
ASSERT_EQ(0, r);
/* check tail */
ASSERT_EQ(info.tail_part_num, i / entries_per_part);
/* try to read all again, see how many entries left */
- r = f->list(max_entries, marker, &result, &more, null_yield);
+ r = f->list(&dp, max_entries, marker, &result, &more, null_yield);
ASSERT_EQ(max_entries - i - 1, result.size());
ASSERT_EQ(false, more);
}
RCf::part_info partinfo;
/* check old tails are removed */
for (auto i = 0; i < info.tail_part_num; ++i) {
- r = f->get_part_info(i, &partinfo, null_yield);
+ r = f->get_part_info(&dp, i, &partinfo, null_yield);
ASSERT_EQ(-ENOENT, r);
}
/* check current tail exists */
- r = f->get_part_info(info.tail_part_num, &partinfo, null_yield);
+ r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield);
ASSERT_EQ(0, r);
}
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
- r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield);
+ r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
std::vector fifos{&f, &f2};
for (auto i = 0u; i < max_entries; ++i) {
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto& f = *fifos[i % fifos.size()];
- r = f->push(bl, null_yield);
+ r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
/* list all by both */
std::vector<RCf::list_entry> result;
bool more = false;
- r = f2->list(max_entries, std::nullopt, &result, &more, null_yield);
+ r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
- r = f2->list(max_entries, std::nullopt, &result, &more, null_yield);
+ r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f1;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f1, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
- r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield);
+ r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
ASSERT_EQ(0, r);
/* push one entry to f2 and the rest to f1 */
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto& f = (i < 1 ? f2 : f1);
- r = f->push(bl, null_yield);
+ r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
std::string marker;
std::vector<RCf::list_entry> result;
bool more = false;
- r = f1->list(num, std::nullopt, &result, &more, null_yield);
+ r = f1->list(&dp, num, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(true, more);
ASSERT_EQ(num, result.size());
auto& entry = result[num - 1];
marker = entry.marker;
- r = f1->trim(marker, false, null_yield);
+ r = f1->trim(&dp, marker, false, null_yield);
/* list what's left by fifo2 */
const auto left = max_entries - num;
- f2->list(left, marker, &result, &more, null_yield);
+ f2->list(&dp, left, marker, &result, &more, null_yield);
ASSERT_EQ(left, result.size());
ASSERT_EQ(false, more);
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
}
ASSERT_EQ(max_entries, bufs.size());
- r = f->push(bufs, null_yield);
+ r = f->push(&dp, bufs, null_yield);
ASSERT_EQ(0, r);
/* list all */
std::vector<RCf::list_entry> result;
bool more = false;
- r = f->list(max_entries, std::nullopt, &result, &more, null_yield);
+ r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
}
ASSERT_EQ(max_entries, bufs.size());
- r = f->push(bufs, null_yield);
+ r = f->push(&dp, bufs, null_yield);
ASSERT_EQ(0, r);
auto info = f->meta();
/* list all at once */
std::vector<RCf::list_entry> result;
bool more = false;
- r = f->list(max_entries, std::nullopt, &result, &more, null_yield);
+ r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(false, more);
ASSERT_EQ(max_entries, result.size());
marker.reset();
for (auto i = 0u; i < max_entries; ++i) {
/* read single entry */
- r = f->list(1, marker, &result, &more, null_yield);
+ r = f->list(&dp, 1, marker, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_EQ(result.size(), 1);
const bool expected_more = (i != (max_entries - 1));
marker = result.front().marker;
std::unique_ptr<R::AioCompletion> c(rados.aio_create_completion(nullptr,
nullptr));
- f->trim(*marker, false, c.get());
+ f->trim(&dp, *marker, false, c.get());
c->wait_for_complete();
r = c->get_return_value();
ASSERT_EQ(0, r);
ASSERT_EQ(info.tail_part_num, i / entries_per_part);
/* try to read all again, see how many entries left */
- r = f->list(max_entries, marker, &result, &more, null_yield);
+ r = f->list(&dp, max_entries, marker, &result, &more, null_yield);
ASSERT_EQ(max_entries - i - 1, result.size());
ASSERT_EQ(false, more);
}
RCf::part_info partinfo;
/* check old tails are removed */
for (auto i = 0; i < info.tail_part_num; ++i) {
- r = f->get_part_info(i, &partinfo, null_yield);
+ r = f->get_part_info(&dp, i, &partinfo, null_yield);
ASSERT_EQ(-ENOENT, r);
}
/* check current tail exists */
- r = f->get_part_info(info.tail_part_num, &partinfo, null_yield);
+ r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield);
ASSERT_EQ(0, r);
}
TEST_F(LegacyFIFO, TestTrimExclusive) {
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield);
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
std::vector<RCf::list_entry> result;
bool more = false;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
- f->push(bl, null_yield);
+ f->push(&dp, bl, null_yield);
}
- f->list(1, std::nullopt, &result, &more, null_yield);
+ f->list(&dp, 1, std::nullopt, &result, &more, null_yield);
auto [val, marker] = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(0, val);
- f->trim(marker, true, null_yield);
+ f->trim(&dp, marker, true, null_yield);
result.clear();
- f->list(max_entries, std::nullopt, &result, &more, null_yield);
+ f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(0, val);
- f->trim(result[4].marker, true, null_yield);
+ f->trim(&dp, result[4].marker, true, null_yield);
result.clear();
- f->list(max_entries, std::nullopt, &result, &more, null_yield);
+ f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(4, val);
- f->trim(result.back().marker, true, null_yield);
+ f->trim(&dp, result.back().marker, true, null_yield);
result.clear();
- f->list(max_entries, std::nullopt, &result, &more, null_yield);
+ f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield);
std::tie(val, marker) = decode_entry<std::uint32_t>(result.front());
ASSERT_EQ(result.size(), 1);
ASSERT_EQ(max_entries - 1, val);
TEST_F(AioLegacyFIFO, TestPushListTrim)
{
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield);
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
auto c = R::Rados::aio_create_completion();
- f->push(bl, c);
+ f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
bool more = false;
for (auto i = 0u; i < max_entries; ++i) {
auto c = R::Rados::aio_create_completion();
- f->list(1, marker, &result, &more, c);
+ f->list(&dp, 1, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
std::string markers[max_entries];
std::uint32_t min_entry = 0;
auto c = R::Rados::aio_create_completion();
- f->list(max_entries * 10, std::nullopt, &result, &more, c);
+ f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
/* trim one entry */
c = R::Rados::aio_create_completion();
- f->trim(markers[min_entry], false, c);
+ f->trim(&dp, markers[min_entry], false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
++min_entry;
c = R::Rados::aio_create_completion();
- f->list(max_entries * 10, std::nullopt, &result, &more, c);
+ f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size, max_entry_size);
ASSERT_EQ(0, r);
bl.append(buf, sizeof(buf));
auto c = R::Rados::aio_create_completion();
- f->push(bl, c);
+ f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
ASSERT_EQ(-E2BIG, r);
c->release();
c = R::Rados::aio_create_completion();
- f->push(std::vector<cb::list>{}, c);
+ f->push(&dp, std::vector<cb::list>{}, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
{
auto c = R::Rados::aio_create_completion();
- f->get_head_info([&](int r, RCf::part_info&& p) {
+ f->get_head_info(&dp, [&](int r, RCf::part_info&& p) {
ASSERT_TRUE(p.tag.empty());
ASSERT_EQ(0, p.magic);
ASSERT_EQ(0, p.min_ofs);
*(int *)buf = i;
bl.append(buf, sizeof(buf));
auto c = R::Rados::aio_create_completion();
- f->push(bl, c);
+ f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
std::vector<RCf::list_entry> result;
bool more = false;
auto c = R::Rados::aio_create_completion();
- f->list(max_entries, std::nullopt, &result, &more, c);
+ f->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
for (auto i = 0u; i < max_entries; ++i) {
c = R::Rados::aio_create_completion();
- f->list(1, marker, &result, &more, c);
+ f->list(&dp, 1, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
for (auto i = 0u; i < max_entries; ++i) {
/* read single entry */
c = R::Rados::aio_create_completion();
- f->list(1, marker, &result, &more, c);
+ f->list(&dp, 1, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
marker = result.front().marker;
c = R::Rados::aio_create_completion();
- f->trim(*marker, false, c);
+ f->trim(&dp, *marker, false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
/* try to read all again, see how many entries left */
c = R::Rados::aio_create_completion();
- f->list(max_entries, marker, &result, &more, c);
+ f->list(&dp, max_entries, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(0, r);
c = R::Rados::aio_create_completion();
- f->get_head_info([&](int r, RCf::part_info&& p) {
+ f->get_head_info(&dp, [&](int r, RCf::part_info&& p) {
ASSERT_EQ(next_ofs, p.next_ofs);
}, c);
c->wait_for_complete();
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
(max_entry_size + part_entry_overhead));
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
- r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield);
+ r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
std::vector fifos{&f, &f2};
for (auto i = 0u; i < max_entries; ++i) {
bl.append(buf, sizeof(buf));
auto& f = *fifos[i % fifos.size()];
auto c = R::Rados::aio_create_completion();
- f->push(bl, c);
+ f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
std::vector<RCf::list_entry> result;
bool more = false;
auto c = R::Rados::aio_create_completion();
- f2->list(max_entries, std::nullopt, &result, &more, c);
+ f2->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
ASSERT_EQ(max_entries, result.size());
c = R::Rados::aio_create_completion();
- f2->list(max_entries, std::nullopt, &result, &more, c);
+ f2->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
static constexpr auto max_part_size = 2048ull;
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f1;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f1, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
const auto max_entries = entries_per_part * 4 + 1;
std::unique_ptr<RCf::FIFO> f2;
- r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield);
+ r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield);
ASSERT_EQ(0, r);
/* push one entry to f2 and the rest to f1 */
bl.append(buf, sizeof(buf));
auto& f = (i < 1 ? f2 : f1);
auto c = R::Rados::aio_create_completion();
- f->push(bl, c);
+ f->push(&dp, bl, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
std::vector<RCf::list_entry> result;
bool more = false;
auto c = R::Rados::aio_create_completion();
- f1->list(num, std::nullopt, &result, &more, c);
+ f1->list(&dp, num, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
auto& entry = result[num - 1];
marker = entry.marker;
c = R::Rados::aio_create_completion();
- f1->trim(marker, false, c);
+ f1->trim(&dp, marker, false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
const auto left = max_entries - num;
c = R::Rados::aio_create_completion();
- f2->list(left, marker, &result, &more, c);
+ f2->list(&dp, left, marker, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
static constexpr auto max_entry_size = 128ull;
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt,
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt,
std::nullopt, false, max_part_size,
max_entry_size);
ASSERT_EQ(0, r);
ASSERT_EQ(max_entries, bufs.size());
auto c = R::Rados::aio_create_completion();
- f->push(bufs, c);
+ f->push(&dp, bufs, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
std::vector<RCf::list_entry> result;
bool more = false;
c = R::Rados::aio_create_completion();
- f->list(max_entries, std::nullopt, &result, &more, c);
+ f->list(&dp, max_entries, std::nullopt, &result, &more, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
TEST_F(LegacyFIFO, TrimAll)
{
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield);
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
- r = f->push(bl, null_yield);
+ r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
/* trim one entry */
- r = f->trim(RCf::marker::max().to_string(), false, null_yield);
+ r = f->trim(&dp, RCf::marker::max().to_string(), false, null_yield);
ASSERT_EQ(-ENODATA, r);
std::vector<RCf::list_entry> result;
bool more;
- r = f->list(1, std::nullopt, &result, &more, null_yield);
+ r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_TRUE(result.empty());
}
TEST_F(LegacyFIFO, AioTrimAll)
{
std::unique_ptr<RCf::FIFO> f;
- auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield);
+ auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield);
ASSERT_EQ(0, r);
static constexpr auto max_entries = 10u;
for (uint32_t i = 0; i < max_entries; ++i) {
cb::list bl;
encode(i, bl);
- r = f->push(bl, null_yield);
+ r = f->push(&dp, bl, null_yield);
ASSERT_EQ(0, r);
}
auto c = R::Rados::aio_create_completion();
- f->trim(RCf::marker::max().to_string(), false, c);
+ f->trim(&dp, RCf::marker::max().to_string(), false, c);
c->wait_for_complete();
r = c->get_return_value();
c->release();
std::vector<RCf::list_entry> result;
bool more;
- r = f->list(1, std::nullopt, &result, &more, null_yield);
+ r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield);
ASSERT_EQ(0, r);
ASSERT_TRUE(result.empty());
}
namespace fifo = rados::cls::fifo;
namespace RCf = rgw::cls::fifo;
+auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
+const DoutPrefix dp(cct, 1, "test log backing: ");
+
class LogBacking : public testing::Test {
protected:
static constexpr int SHARDS = 3;
cb::list bl;
encode(i, bl);
cls_log_add(op, ceph_clock_now(), {}, "meow", bl);
- auto r = rgw_rados_operate(ioctx, get_oid(0, i), &op, null_yield);
+ auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield);
ASSERT_GE(r, 0);
}
}
cb::list bl;
encode(i, bl);
cls_log_add(op, ceph_clock_now(), {}, "meow", bl);
- auto r = rgw_rados_operate(ioctx, get_oid(0, i), &op, null_yield);
+ auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield);
ASSERT_GE(r, 0);
}
std::list<cls_log_entry> entries;
bool truncated = false;
cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield);
+ auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield);
ASSERT_GE(r, 0);
ASSERT_FALSE(entries.empty());
}
{
lr::ObjectWriteOperation op;
cls_log_trim(op, {}, {}, {}, to_marker);
- auto r = rgw_rados_operate(ioctx, oid, &op, null_yield);
+ auto r = rgw_rados_operate(&dp, ioctx, oid, &op, null_yield);
ASSERT_GE(r, 0);
}
{
std::list<cls_log_entry> entries;
bool truncated = false;
cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated);
- auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield);
+ auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield);
ASSERT_GE(r, 0);
ASSERT_TRUE(entries.empty());
}
{
for (int i = 0; i < SHARDS; ++i) {
std::unique_ptr<RCf::FIFO> fifo;
- auto r = RCf::FIFO::create(ioctx, get_oid(0, i), &fifo, null_yield);
+ auto r = RCf::FIFO::create(&dp, ioctx, get_oid(0, i), &fifo, null_yield);
ASSERT_EQ(0, r);
ASSERT_TRUE(fifo);
}
{
using ceph::encode;
std::unique_ptr<RCf::FIFO> fifo;
- auto r = RCf::FIFO::open(ioctx, get_oid(0, i), &fifo, null_yield);
+ auto r = RCf::FIFO::open(&dp, ioctx, get_oid(0, i), &fifo, null_yield);
ASSERT_GE(0, r);
ASSERT_TRUE(fifo);
cb::list bl;
encode(i, bl);
- r = fifo->push(bl, null_yield);
+ r = fifo->push(&dp, bl, null_yield);
ASSERT_GE(0, r);
}
TEST_F(LogBacking, TestOmap)
{
make_omap();
- auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS,
+ auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::omap, *stat);
TEST_F(LogBacking, TestOmapEmpty)
{
- auto stat = log_backing_type(ioctx, log_type::omap, SHARDS,
+ auto stat = log_backing_type(&dp, ioctx, log_type::omap, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::omap, *stat);
TEST_F(LogBacking, TestFIFO)
{
make_fifo();
- auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS,
+ auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::fifo, *stat);
TEST_F(LogBacking, TestFIFOEmpty)
{
- auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS,
+ auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS,
[this](int shard){ return get_oid(0, shard); },
null_yield);
ASSERT_EQ(log_type::fifo, *stat);
TEST_F(LogBacking, GenerationSingle)
{
auto lgr = logback_generations::init<generations>(
- ioctx, "foobar", [this](uint64_t gen_id, int shard) {
+ &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_TRUE(lgr);
ASSERT_EQ(log_type::fifo, lg->got_entries[0].type);
ASSERT_FALSE(lg->got_entries[0].pruned);
- auto ec = lg->empty_to(0, null_yield);
+ auto ec = lg->empty_to(&dp, 0, null_yield);
ASSERT_TRUE(ec);
lg.reset();
lg = *logback_generations::init<generations>(
- ioctx, "foobar", [this](uint64_t gen_id, int shard) {
+ &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
lg->got_entries.clear();
- ec = lg->new_backing(log_type::omap, null_yield);
+ ec = lg->new_backing(&dp, log_type::omap, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, lg->got_entries.size());
lg.reset();
lg = *logback_generations::init<generations>(
- ioctx, "foobar", [this](uint64_t gen_id, int shard) {
+ &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_EQ(log_type::omap, lg->got_entries[1].type);
ASSERT_FALSE(lg->got_entries[1].pruned);
- ec = lg->empty_to(0, null_yield);
+ ec = lg->empty_to(&dp, 0, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(0, *lg->tail);
lg.reset();
lg = *logback_generations::init<generations>(
- ioctx, "foobar", [this](uint64_t gen_id, int shard) {
+ &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
ASSERT_EQ(log_type::omap, lg->got_entries[1].type);
ASSERT_FALSE(lg->got_entries[1].pruned);
- ec = lg->remove_empty(null_yield);
+ ec = lg->remove_empty(&dp, null_yield);
ASSERT_FALSE(ec);
auto entries = lg->entries();
TEST_F(LogBacking, GenerationWN)
{
auto lg1 = *logback_generations::init<generations>(
- ioctx, "foobar", [this](uint64_t gen_id, int shard) {
+ &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
- auto ec = lg1->new_backing(log_type::omap, null_yield);
+ auto ec = lg1->new_backing(&dp, log_type::omap, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, lg1->got_entries.size());
lg1->got_entries.clear();
auto lg2 = *logback_generations::init<generations>(
- ioctx2, "foobar", [this](uint64_t gen_id, int shard) {
+ &dp, ioctx2, "foobar", [this](uint64_t gen_id, int shard) {
return get_oid(gen_id, shard);
}, SHARDS, log_type::fifo, null_yield);
lg2->got_entries.clear();
- ec = lg1->new_backing(log_type::fifo, null_yield);
+ ec = lg1->new_backing(&dp, log_type::fifo, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, lg1->got_entries.size());
lg1->got_entries.clear();
lg2->got_entries.clear();
- ec = lg2->empty_to(1, null_yield);
+ ec = lg2->empty_to(&dp, 1, null_yield);
ASSERT_FALSE(ec);
ASSERT_EQ(1, *lg1->tail);
return 0;
}
- virtual int read_stats(optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) override {
+ virtual int read_stats(const DoutPrefixProvider *dpp, optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) override {
return 0;
}
- virtual int read_stats_async(RGWGetUserStats_CB *cb) override {
+ virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB *cb) override {
return 0;
}
- virtual int complete_flush_stats(optional_yield y) override {
+ virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override {
return 0;
}
- virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) override {
+ virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) override {
return 0;
}
- virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) override {
+ virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override {
return 0;
}
using namespace std;
+auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT);
+const DoutPrefix dp(cct, 1, "test rgw manifest: ");
+
struct OldObjManifestPart {
old_rgw_obj loc; /* the object where the data is located */
uint64_t loc_ofs; /* the offset at that object where the data is located */
list<rgw_obj>::iterator liter;
RGWObjManifest::obj_iterator iter;
- for (iter = manifest.obj_begin(), liter = objs.begin();
- iter != manifest.obj_end() && liter != objs.end();
+ for (iter = manifest.obj_begin(&dp), liter = objs.begin();
+ iter != manifest.obj_end(&dp) && liter != objs.end();
++iter, ++liter) {
ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location()));
}
- ASSERT_TRUE(iter == manifest.obj_end());
+ ASSERT_TRUE(iter == manifest.obj_end(&dp));
ASSERT_TRUE(liter == objs.end());
rgw_raw_obj raw_head;
- iter = manifest.obj_find(100 * 1024);
+ iter = manifest.obj_find(&dp, 100 * 1024);
ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head));
ASSERT_EQ((int)iter.get_stripe_size(), obj_size);
}
rgw_obj_select last_obj;
RGWObjManifest::obj_iterator iter;
- for (iter = manifest.obj_begin(), liter = objs.begin();
- iter != manifest.obj_end() && liter != objs.end();
+ for (iter = manifest.obj_begin(&dp), liter = objs.begin();
+ iter != manifest.obj_end(&dp) && liter != objs.end();
++iter, ++liter) {
cout << "*liter=" << *liter << " iter.get_location()=" << env.get_raw(iter.get_location()) << std::endl;
ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location()));
last_obj = iter.get_location();
}
- ASSERT_TRUE(iter == manifest.obj_end());
+ ASSERT_TRUE(iter == manifest.obj_end(&dp));
ASSERT_TRUE(liter == objs.end());
- iter = manifest.obj_find(100 * 1024);
+ iter = manifest.obj_find(&dp, 100 * 1024);
ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head));
ASSERT_EQ((int)iter.get_stripe_size(), head_size);
uint64_t ofs = 20 * 1024 * 1024 + head_size;
- iter = manifest.obj_find(ofs + 100);
+ iter = manifest.obj_find(&dp, ofs + 100);
ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(last_obj));
ASSERT_EQ(iter.get_stripe_ofs(), ofs);
RGWObjManifest m;
for (int i = 0; i < num_parts; i++) {
- m.append(pm[i], env.zonegroup, env.zone_params);
+ m.append(&dp, pm[i], env.zonegroup, env.zone_params);
}
RGWObjManifest::obj_iterator iter;
- for (iter = m.obj_begin(); iter != m.obj_end(); ++iter) {
- RGWObjManifest::obj_iterator fiter = m.obj_find(iter.get_ofs());
+ for (iter = m.obj_begin(&dp); iter != m.obj_end(&dp); ++iter) {
+ RGWObjManifest::obj_iterator fiter = m.obj_find(&dp, iter.get_ofs());
ASSERT_TRUE(env.get_raw(fiter.get_location()) == env.get_raw(iter.get_location()));
}
RGWObjManifest::obj_iterator iter;
auto liter = old_objs.begin();
- for (iter = manifest.obj_begin();
- iter != manifest.obj_end() && liter != old_objs.end();
+ for (iter = manifest.obj_begin(&dp);
+ iter != manifest.obj_end(&dp) && liter != old_objs.end();
++iter, ++liter) {
rgw_pool old_pool(liter->bucket.data_pool);
string old_oid;
}
ASSERT_TRUE(liter == old_objs.end());
- ASSERT_TRUE(iter == manifest.obj_end());
+ ASSERT_TRUE(iter == manifest.obj_end(&dp));
}
// mock puller that throws an exception if it's called
struct ErrorPuller : public RGWPeriodHistory::Puller {
- int pull(const std::string& id, RGWPeriod& period, optional_yield) override {
+ int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
throw std::runtime_error("unexpected call to pull");
}
};
public:
explicit RecordingPuller(int error) : error(error) {}
Ids ids;
- int pull(const std::string& id, RGWPeriod& period, optional_yield) override {
+ int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
ids.push_back(id);
return error;
}
// mock puller that returns a fake period by parsing the period id
struct NumericPuller : public RGWPeriodHistory::Puller {
- int pull(const std::string& id, RGWPeriod& period, optional_yield) override {
+ int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
// relies on numeric period ids to divine the realm_epoch
auto realm_epoch = boost::lexical_cast<epoch_t>(id);
auto predecessor = boost::lexical_cast<std::string>(realm_epoch-1);
{
RecordingPuller puller{-EFAULT};
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
+ const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
// create a disjoint history at 1 and verify that periods are requested
// backwards from current_period
- auto c1 = history.attach(make_period("1", 1, ""), null_yield);
+ auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids{"4"}, puller.ids);
auto c4 = history.insert(make_period("4", 4, "3"));
ASSERT_TRUE(c4);
- c1 = history.attach(make_period("1", 1, ""), null_yield);
+ c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids({"4", "3"}), puller.ids);
auto c3 = history.insert(make_period("3", 3, "2"));
ASSERT_TRUE(c3);
- c1 = history.attach(make_period("1", 1, ""), null_yield);
+ c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids);
auto c2 = history.insert(make_period("2", 2, "1"));
ASSERT_TRUE(c2);
- c1 = history.attach(make_period("1", 1, ""), null_yield);
+ c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_TRUE(c1);
ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids);
}
{
RecordingPuller puller{-EFAULT};
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
+ const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
// create a disjoint history at 9 and verify that periods are requested
// backwards down to current_period
- auto c9 = history.attach(make_period("9", 9, "8"), null_yield);
+ auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield);
ASSERT_FALSE(c9);
ASSERT_EQ(-EFAULT, c9.get_error());
ASSERT_EQ(Ids{"8"}, puller.ids);
- auto c8 = history.attach(make_period("8", 8, "7"), null_yield);
+ auto c8 = history.attach(&dp, make_period("8", 8, "7"), null_yield);
ASSERT_FALSE(c8);
ASSERT_EQ(-EFAULT, c8.get_error());
ASSERT_EQ(Ids({"8", "7"}), puller.ids);
- auto c7 = history.attach(make_period("7", 7, "6"), null_yield);
+ auto c7 = history.attach(&dp, make_period("7", 7, "6"), null_yield);
ASSERT_FALSE(c7);
ASSERT_EQ(-EFAULT, c7.get_error());
ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids);
- auto c6 = history.attach(make_period("6", 6, "5"), null_yield);
+ auto c6 = history.attach(&dp, make_period("6", 6, "5"), null_yield);
ASSERT_TRUE(c6);
ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids);
}
{
NumericPuller puller;
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
+ const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
- auto c1 = history.attach(make_period("1", 1, ""), null_yield);
+ auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_TRUE(c1);
// verify that we pulled and merged all periods from 1-5
{
NumericPuller puller;
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
+ const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
- auto c9 = history.attach(make_period("9", 9, "8"), null_yield);
+ auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield);
ASSERT_TRUE(c9);
// verify that we pulled and merged all periods from 5-9
protected:
RGWSI_RADOS::Obj make_obj(const std::string& oid) {
auto obj = RadosEnv::rados->obj({{RadosEnv::poolname}, oid});
- ceph_assert_always(0 == obj.open());
+ const NoDoutPrefix no_dpp(g_ceph_context, 1);
+ ceph_assert_always(0 == obj.open(&no_dpp));
return obj;
}
};
RGWCORSRule *xml_to_cors_rule(string s){
RGWCORSConfiguration_S3 *cors_config;
- RGWCORSXMLParser_S3 parser(g_ceph_context);
+ const DoutPrefix dp(g_ceph_context, 1, "test cors: ");
+ RGWCORSXMLParser_S3 parser(&dp, g_ceph_context);
const string *data = g_test->get_response_data();
if (!parser.init()) {
return NULL;