#define dout_prefix *_dout
// Commands shared between OSD's console and admin console:
-namespace ceph {
-namespace osd_cmds {
+namespace ceph::osd_cmds {
int heap(CephContext& cct, const cmdmap_t& cmdmap, Formatter& f, std::ostream& os);
-
-}} // namespace ceph::osd_cmds
+
+} // namespace ceph::osd_cmds
int OSD::mkfs(CephContext *cct, ObjectStore *store, uuid_d fsid, int whoami, string osdspec_affinity)
{
{
if (!gss_ktfile_client.empty()) {
- // Assert we can export environment variable
- /*
+ // Assert we can export environment variable
+ /*
The default client keytab is used, if it is present and readable,
to automatically obtain initial credentials for GSSAPI client
applications. The principal name of the first entry in the client
2. The default_client_keytab_name profile variable in [libdefaults].
3. The hardcoded default, DEFCKTNAME.
*/
- const int32_t set_result(setenv("KRB5_CLIENT_KTNAME",
+ const int32_t set_result(setenv("KRB5_CLIENT_KTNAME",
gss_ktfile_client.c_str(), 1));
ceph_assert(set_result == 0);
}
store->compact();
auto end = ceph::coarse_mono_clock::now();
double duration = std::chrono::duration<double>(end-start).count();
- dout(1) << "finished manual compaction in "
+ dout(1) << "finished manual compaction in "
<< duration
<< " seconds" << dendl;
f->open_object_section("compact_result");
requested_full_first = requested_full_last = 0;
return;
}
-
+
requested_full_first = e + 1;
dout(10) << __func__ << " " << e << ", requested " << requested_full_first
service.release_map(nextmap);
}
}
- OID_EVENT_TRACE_WITH_MSG(m, "MS_FAST_DISPATCH_END", false);
+ OID_EVENT_TRACE_WITH_MSG(m, "MS_FAST_DISPATCH_END", false);
}
int OSD::ms_handle_authentication(Connection *con)
time_permit = true;
}
}
- if (!time_permit) {
+ if (time_permit) {
dout(20) << __func__ << " should run between " << cct->_conf->osd_scrub_begin_hour
<< " - " << cct->_conf->osd_scrub_end_hour
- << " now " << bdt.tm_hour << " = no" << dendl;
+ << " now " << bdt.tm_hour << " = yes" << dendl;
} else {
dout(20) << __func__ << " should run between " << cct->_conf->osd_scrub_begin_hour
<< " - " << cct->_conf->osd_scrub_end_hour
- << " now " << bdt.tm_hour << " = yes" << dendl;
+ << " now " << bdt.tm_hour << " = no" << dendl;
}
return time_permit;
}
OSDService::ScrubJob scrub_job;
if (service.first_scrub_stamp(&scrub_job)) {
do {
- dout(30) << "sched_scrub examine " << scrub.pgid << " at " << scrub.sched_time << dendl;
+ dout(30) << "sched_scrub examine " << scrub_job.pgid << " at " << scrub_job.sched_time << dendl;
if (scrub_job.sched_time > now) {
// save ourselves some effort
set<int> avoid_ports;
#if defined(__FreeBSD__)
// prevent FreeBSD from grabbing the client_messenger port during
- // rebinding. In which case a cluster_meesneger will connect also
+ // rebinding. In which case a cluster_meesneger will connect also
// to the same port
client_messenger->get_myaddrs().get_ports(&avoid_ports);
#endif
// This is true for the first recovery op and when the previous recovery op
// has been scheduled in the past. The next recovery op is scheduled after
// completing the sleep from now.
-
+
if (auto now = ceph::real_clock::now();
service.recovery_schedule_time < now) {
service.recovery_schedule_time = now;
#endif
bool do_unfound = pg->start_recovery_ops(reserved_pushes, handle, &started);
- dout(10) << "do_recovery started " << started << "/" << reserved_pushes
+ dout(10) << "do_recovery started " << started << "/" << reserved_pushes
<< " on " << *pg << dendl;
if (do_unfound) {
<< cct->_conf->osd_pg_epoch_persisted_max_stale << ")";
}
if (cct->_conf->osd_object_clean_region_max_num_intervals < 0) {
- clog->warn() << "osd_object_clean_region_max_num_intervals ("
+ clog->warn() << "osd_object_clean_region_max_num_intervals ("
<< cct->_conf->osd_object_clean_region_max_num_intervals
<< ") is < 0";
}
sdata->sdata_cond.notify_one();
}
-namespace ceph {
-namespace osd_cmds {
+namespace ceph::osd_cmds {
int heap(CephContext& cct, const cmdmap_t& cmdmap, Formatter& f,
std::ostream& os)
os << "could not issue heap profiler command -- not using tcmalloc!";
return -EOPNOTSUPP;
}
-
+
string cmd;
if (!cmd_getval(cmdmap, "heapcmd", cmd)) {
os << "unable to get value for command \"" << cmd << "\"";
return -EINVAL;
}
-
+
std::vector<std::string> cmd_vec;
get_str_vec(cmd, cmd_vec);
if (cmd_getval(cmdmap, "value", val)) {
cmd_vec.push_back(val);
}
-
+
ceph_heap_profiler_handle_command(cmd_vec, os);
-
+
return 0;
}
-
-}} // namespace ceph::osd_cmds
+
+} // namespace ceph::osd_cmds
if (candidate >= next) {
break;
}
-
+
if (response.entries.size() == list_size) {
next = candidate;
break;
// mds should have stopped writing before this point.
// We can't allow OSD to become non-startable even if mds
// could be writing as part of file removals.
- if (write_ordered && osd->check_failsafe_full(get_dpp()) &&
+ if (write_ordered && osd->check_failsafe_full(get_dpp()) &&
!m->has_flag(CEPH_OSD_FLAG_FULL_TRY)) {
dout(10) << __func__ << " fail-safe full check failed, dropping request." << dendl;
return;
// make sure locator is consistent
object_locator_t oloc(obc->obs.oi.soid);
if (m->get_object_locator() != oloc) {
- dout(10) << " provided locator " << m->get_object_locator()
+ dout(10) << " provided locator " << m->get_object_locator()
<< " != object's " << obc->obs.oi.soid << dendl;
- osd->clog->warn() << "bad locator " << m->get_object_locator()
+ osd->clog->warn() << "bad locator " << m->get_object_locator()
<< " on object " << oloc
<< " op " << *m;
}
OSDOp& osd_op = *p;
ceph_osd_op& op = osd_op.op;
if (op.op == CEPH_OSD_OP_SET_REDIRECT ||
- op.op == CEPH_OSD_OP_SET_CHUNK ||
+ op.op == CEPH_OSD_OP_SET_CHUNK ||
op.op == CEPH_OSD_OP_UNSET_MANIFEST ||
op.op == CEPH_OSD_OP_TIER_PROMOTE ||
op.op == CEPH_OSD_OP_TIER_FLUSH ||
op.op == CEPH_OSD_OP_TIER_EVICT) {
return cache_result_t::NOOP;
- }
+ }
}
switch (obc->obs.oi.manifest.type) {
if (op->may_write() || write_ordered) {
do_proxy_write(op, obc);
} else {
- // promoted object
+ // promoted object
if (obc->obs.oi.size != 0) {
return cache_result_t::NOOP;
}
do_proxy_read(op, obc);
}
return cache_result_t::HANDLED_PROXY;
- case object_manifest_t::TYPE_CHUNKED:
+ case object_manifest_t::TYPE_CHUNKED:
{
if (can_proxy_chunked_read(op, obc)) {
map<hobject_t,FlushOpRef>::iterator p = flush_ops.find(obc->obs.oi.soid);
if (!check_laggy_requeue(op)) {
return cache_result_t::BLOCKED_RECOVERY;
}
-
+
for (auto& p : obc->obs.oi.manifest.chunk_map) {
if (p.second.is_missing()) {
auto m = op->get_req<MOSDOp>();
do_proxy_write(op);
// Promote too?
- if (!op->need_skip_promote() &&
+ if (!op->need_skip_promote() &&
maybe_promote(obc, missing_oid, oloc, in_hit_set,
pool.info.min_write_recency_for_promote,
OpRequestRef(),
prdop->ops[op_index].outdata.begin(copy_offset).copy_in(
obj_op->ops[0].outdata.length(),
obj_op->ops[0].outdata.c_str());
- }
-
+ }
+
pg->finish_proxy_read(oid, tid, r);
pg->osd->logger->tinc(l_osd_tier_r_lat, ceph_clock_now() - start);
if (obj_op) {
switch (obc->obs.oi.manifest.type) {
case object_manifest_t::TYPE_REDIRECT:
oloc = object_locator_t(obc->obs.oi.manifest.redirect_target);
- soid = obc->obs.oi.manifest.redirect_target;
+ soid = obc->obs.oi.manifest.redirect_target;
break;
default:
ceph_abort_msg("unrecognized manifest type");
switch (obc->obs.oi.manifest.type) {
case object_manifest_t::TYPE_REDIRECT:
oloc = object_locator_t(obc->obs.oi.manifest.redirect_target);
- soid = obc->obs.oi.manifest.redirect_target;
+ soid = obc->obs.oi.manifest.redirect_target;
break;
default:
ceph_abort_msg("unrecognized manifest type");
in_progress_proxy_ops[soid].push_back(op);
}
-void PrimaryLogPG::do_proxy_chunked_op(OpRequestRef op, const hobject_t& missing_oid,
+void PrimaryLogPG::do_proxy_chunked_op(OpRequestRef op, const hobject_t& missing_oid,
ObjectContextRef obc, bool write_ordered)
{
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
chunk_index = 0;
chunk_length = 0;
/* find the right chunk position for cursor */
- for (auto &p : manifest->chunk_map) {
- if (p.first <= cursor && p.first + p.second.length > cursor) {
- chunk_length = p.second.length;
- chunk_index = p.first;
+ for (auto &p : manifest->chunk_map) {
+ if (p.first <= cursor && p.first + p.second.length > cursor) {
+ chunk_length = p.second.length;
+ chunk_index = p.first;
break;
}
- }
+ }
/* no index */
if (!chunk_index && !chunk_length) {
if (cursor == osd_op->op.extent.offset) {
- OpContext *ctx = new OpContext(op, m->get_reqid(), &m->ops, this);
+ OpContext *ctx = new OpContext(op, m->get_reqid(), &m->ops, this);
ctx->reply = new MOSDOpReply(m, 0, get_osdmap_epoch(), 0, false);
- ctx->data_off = osd_op->op.extent.offset;
- ctx->ignore_log_op_stats = true;
- complete_read_ctx(0, ctx);
+ ctx->data_off = osd_op->op.extent.offset;
+ ctx->ignore_log_op_stats = true;
+ complete_read_ctx(0, ctx);
}
break;
}
}
/* the size to read -> | op length | */
/* | a chunk | */
- if (cursor + next_length > chunk_index + chunk_length) {
- next_length = chunk_index + chunk_length - cursor;
- }
+ if (cursor + next_length > chunk_index + chunk_length) {
+ next_length = chunk_index + chunk_length - cursor;
+ }
chunk_read[cursor] = {{chunk_index, next_length}};
cursor += next_length;
req_len = cursor - osd_op->op.extent.offset;
for (auto &p : chunk_read) {
auto chunks = p.second.begin();
- dout(20) << __func__ << " chunk_index: " << chunks->first
- << " next_length: " << chunks->second << " cursor: "
+ dout(20) << __func__ << " chunk_index: " << chunks->first
+ << " next_length: " << chunks->second << " cursor: "
<< p.first << dendl;
do_proxy_chunked_read(op, obc, i, chunks->first, p.first, chunks->second, req_len, write_ordered);
}
- }
+ }
}
struct RefCountCallback : public Context {
PrimaryLogPG::OpContext *ctx;
OSDOp& osd_op;
bool requeue = false;
-
+
RefCountCallback(PrimaryLogPG::OpContext *ctx, OSDOp &osd_op)
: ctx(ctx), osd_op(osd_op) {}
void finish(int r) override {
ObjectContextRef PrimaryLogPG::get_prev_clone_obc(ObjectContextRef obc)
{
- auto s = std::find(obc->ssc->snapset.clones.begin(), obc->ssc->snapset.clones.end(),
+ auto s = std::find(obc->ssc->snapset.clones.begin(), obc->ssc->snapset.clones.end(),
obc->obs.oi.soid.snap);
if (s != obc->ssc->snapset.clones.begin()) {
auto s_iter = s - 1;
// We *must* find the clone iff it's not head,
// let s == snapset.clones.end() mean head
- ceph_assert((s == snapset.clones.end()) == oi.soid.is_head());
+ ceph_assert((s == snapset.clones.end()) == oi.soid.is_head());
if (s != snapset.clones.begin()) {
_l = get_context(s - 1);
dec_refcount(soid, refs);
});
}
- } else if (oi.manifest.is_redirect() &&
+ } else if (oi.manifest.is_redirect() &&
oi.test_flag(object_info_t::FLAG_REDIRECT_HAS_REFERENCE)) {
ctx->register_on_commit(
[oi, this](){
refcount_manifest(oi.soid, oi.manifest.redirect_target,
refcount_t::DECREMENT_REF, NULL, std::nullopt);
});
- }
+ }
}
ceph_tid_t PrimaryLogPG::refcount_manifest(hobject_t src_soid, hobject_t tgt_soid, refcount_t type,
object_manifest_t *manifest = &obc->obs.oi.manifest;
if (!manifest->chunk_map.count(chunk_index)) {
return;
- }
+ }
uint64_t chunk_length = manifest->chunk_map[chunk_index].length;
hobject_t soid = manifest->chunk_map[chunk_index].oid;
hobject_t ori_soid = m->get_hobj();
if (write_ordered) {
flags |= CEPH_OSD_FLAG_RWORDERED;
}
-
+
if (!chunk_length || soid == hobject_t()) {
return;
}
CEPH_OSD_FLAG_ENFORCE_SNAPC |
CEPH_OSD_FLAG_MAP_SNAP_CLONE);
- dout(10) << __func__ << " Start do chunk proxy read for " << *m
- << " index: " << op_index << " oid: " << soid.oid.name << " req_offset: " << req_offset
+ dout(10) << __func__ << " Start do chunk proxy read for " << *m
+ << " index: " << op_index << " oid: " << soid.oid.name << " req_offset: " << req_offset
<< " req_length: " << req_length << dendl;
ProxyReadOpRef prdop(std::make_shared<ProxyReadOp>(op, ori_soid, m->ops));
osd_op.op.extent.offset = manifest->chunk_map[chunk_index].offset + req_offset - chunk_index;
} else {
ceph_abort_msg("chunk_index > req_offset");
- }
- osd_op.op.extent.length = req_length;
+ }
+ osd_op.op.extent.length = req_length;
ObjectOperation obj_op;
obj_op.dup(pobj_op->ops);
osd_op = &m->ops[i];
ceph_osd_op op = osd_op->op;
switch (op.op) {
- case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SYNC_READ: {
uint64_t cursor = osd_op->op.extent.offset;
uint64_t remain = osd_op->op.extent.length;
}
if (p.second.length >= remain) {
remain = 0;
- break;
+ break;
} else {
remain = remain - p.second.length;
}
cursor += p.second.length;
}
}
-
+
if (remain) {
dout(20) << __func__ << " requested chunks don't exist in chunk_map " << dendl;
return false;
ctx->mtime = m->get_mtime();
dout(10) << __func__ << " " << soid << " " << *ctx->ops
- << " ov " << obc->obs.oi.version << " av " << ctx->at_version
+ << " ov " << obc->obs.oi.version << " av " << ctx->at_version
<< " snapc " << ctx->snapc
<< " snapset " << obc->ssc->snapset
- << dendl;
+ << dendl;
} else {
dout(10) << __func__ << " " << soid << " " << *ctx->ops
<< " ov " << obc->obs.oi.version
- << dendl;
+ << dendl;
}
if (!ctx->user_at_version)
ctx->at_version = get_next_version();
PGTransaction *t = ctx->op_t.get();
-
+
if (new_snaps.empty()) {
// remove clone
dout(10) << coid << " snaps " << old_snaps << " -> "
// ...from snapset
ceph_assert(p != snapset.clones.end());
-
+
snapid_t last = coid.snap;
ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(last);
snapset.clone_overlap.erase(last);
snapset.clone_size.erase(last);
snapset.clone_snaps.erase(last);
-
+
ctx->log.push_back(
pg_log_entry_t(
pg_log_entry_t::DELETE,
}
switch (op.op) {
-
+
// --- READS ---
case CEPH_OSD_OP_CMPEXT:
result = getattrs_maybe_cache(
ctx->obc,
&out);
-
+
bufferlist bl;
encode(out, bl);
ctx->delta_stats.num_rd_kb += shift_round_up(bl.length(), 10);
osd_op.outdata.claim_append(bl);
}
break;
-
+
case CEPH_OSD_OP_CMPXATTR:
++ctx->num_read;
{
tracepoint(osd, do_osd_op_pre_cmpxattr, soid.oid.name.c_str(), soid.snap.val, aname.c_str());
string name = "_" + aname;
name[op.xattr.name_len + 1] = 0;
-
+
bufferlist xattr;
result = getattr_maybe_cache(
ctx->obc,
&xattr);
if (result < 0 && result != -EEXIST && result != -ENODATA)
break;
-
+
ctx->delta_stats.num_rd++;
ctx->delta_stats.num_rd_kb += shift_round_up(xattr.length(), 10);
}
if (result < 0) {
break;
- }
+ }
if (!ctx->obc->obs.oi.is_whiteout()) {
ceph_assert(obs.exists);
clone_info ci;
dout(10) << "clean_regions modified" << ctx->clean_regions << dendl;
}
break;
-
+
case CEPH_OSD_OP_WRITEFULL:
++ctx->num_write;
result = 0;
static_cast<Option::size_t>(osd->osd_max_object_size), get_dpp());
if (result < 0)
break;
-
+
ceph_assert(op.extent.length);
if (obs.exists && !oi.is_whiteout()) {
t->zero(soid, op.extent.offset, op.extent.length);
oi.clear_data_digest();
}
break;
-
+
case CEPH_OSD_OP_DELETE:
++ctx->num_write;
result = 0;
result = -EINVAL;
goto fail;
}
-
+
if (!src_length) {
result = -EINVAL;
goto fail;
result = -EINPROGRESS;
break;
}
- }
+ }
if (op_finisher) {
result = op_finisher->execute();
ceph_assert(result == 0);
}
oi.manifest.chunk_map[src_offset] = chunk_info;
- if (!oi.has_manifest() && !oi.manifest.is_chunked())
+ if (!oi.has_manifest() && !oi.manifest.is_chunked())
ctx->delta_stats.num_objects_manifest++;
oi.set_flag(object_info_t::FLAG_MANIFEST);
oi.manifest.type = object_manifest_t::TYPE_CHUNKED;
ctx->modify = true;
ctx->cache_operation = true;
- dout(10) << "set-chunked oid:" << oi.soid << " user_version: " << oi.user_version
+ dout(10) << "set-chunked oid:" << oi.soid << " user_version: " << oi.user_version
<< " chunk_info: " << chunk_info << dendl;
if (op_finisher) {
ctx->op_finishers.erase(ctx->current_osd_subop_num);
result = -EINVAL;
break;
}
-
+
// The chunks already has a reference, so it is just enough to invoke truncate if necessary
uint64_t chunk_length = 0;
for (auto p : obs.oi.manifest.chunk_map) {
break;
// -- object attrs --
-
+
case CEPH_OSD_OP_SETXATTR:
++ctx->num_write;
result = 0;
ctx->delta_stats.num_wr++;
}
break;
-
+
// -- fancy writers --
case CEPH_OSD_OP_APPEND:
goto fail;
}
tracepoint(osd, do_osd_op_pre_omap_cmp, soid.oid.name.c_str(), soid.snap.val, list_keys(assertions).c_str());
-
+
map<string, bufferlist> out;
if (oi.is_omap()) {
{
ObjectContextRef promote_obc;
cache_result_t tier_mode_result;
- if (obs.exists && obs.oi.has_manifest()) {
- tier_mode_result =
+ if (obs.exists && obs.oi.has_manifest()) {
+ tier_mode_result =
maybe_handle_manifest_detail(
ctx->op,
true,
rollback_to);
} else {
- tier_mode_result =
+ tier_mode_result =
maybe_handle_cache_detail(
ctx->op,
true,
ceph_assert(soid.snap == CEPH_NOSNAP);
dout(20) << "make_writeable " << soid << " snapset=" << ctx->new_snapset
<< " snapc=" << snapc << dendl;
-
+
bool was_dirty = ctx->obc->obs.oi.is_dirty();
if (ctx->new_obs.exists) {
// we will mark the object dirty
// clone
hobject_t coid = soid;
coid.snap = snapc.seq;
-
+
unsigned l;
for (l = 1;
l < snapc.snaps.size() && snapc.snaps[l] > ctx->new_snapset.seq;
vector<snapid_t> snaps(l);
for (unsigned i=0; i<l; i++)
snaps[i] = snapc.snaps[i];
-
+
// prepare clone
object_info_t static_snap_oi(coid);
object_info_t *snap_oi;
ctx->clone_obc->attr_cache = ctx->obc->attr_cache;
snap_oi = &ctx->clone_obc->obs.oi;
if (ctx->obc->obs.oi.has_manifest()) {
- if ((ctx->obc->obs.oi.flags & object_info_t::FLAG_REDIRECT_HAS_REFERENCE) &&
+ if ((ctx->obc->obs.oi.flags & object_info_t::FLAG_REDIRECT_HAS_REFERENCE) &&
ctx->obc->obs.oi.manifest.is_redirect()) {
snap_oi->set_flag(object_info_t::FLAG_MANIFEST);
snap_oi->manifest.type = object_manifest_t::TYPE_REDIRECT;
snap_oi->copy_user_bits(ctx->obs->oi);
_make_clone(ctx, ctx->op_t.get(), ctx->clone_obc, soid, coid, snap_oi);
-
+
ctx->delta_stats.num_objects++;
if (snap_oi->is_dirty()) {
ctx->delta_stats.num_objects_dirty++;
ctx->new_snapset.clone_size[coid.snap] = ctx->obs->oi.size;
ctx->new_snapset.clone_snaps[coid.snap] = snaps;
- // clone_overlap should contain an entry for each clone
+ // clone_overlap should contain an entry for each clone
// (an empty interval_set if there is no overlap)
ctx->new_snapset.clone_overlap[coid.snap];
if (ctx->obs->oi.size)
ctx->new_snapset.clone_overlap[coid.snap].insert(0, ctx->obs->oi.size);
-
+
// log clone
dout(10) << " cloning v " << ctx->obs->oi.version
<< " to " << coid << " v " << ctx->at_version
}
newest_overlap.subtract(ctx->modified_ranges);
}
-
+
if (snapc.seq > ctx->new_snapset.seq) {
// update snapset with latest snap context
ctx->new_snapset.seq = snapc.seq;
log_op_type != pg_log_entry_t::PROMOTE) {
dec_refcount_by_dirty(ctx);
}
-
+
// finish and log the op.
if (ctx->user_modify) {
// update the user_version for any modify ops, except for the watch op
ctx->new_obs.oi.user_version = ctx->user_at_version;
}
ctx->bytes_written = ctx->op_t->get_bytes_written();
-
+
if (ctx->new_obs.exists) {
ctx->new_obs.oi.version = ctx->at_version;
ctx->new_obs.oi.prior_version = ctx->obs->oi.version;
auto m = ctx->op->get_req<MOSDOp>();
ceph_assert(ctx->async_reads_complete());
- for (vector<OSDOp>::iterator p = ctx->ops->begin();
+ for (auto p = ctx->ops->begin();
p != ctx->ops->end() && result >= 0; ++p) {
if (p->rval < 0 && !(p->op.flags & CEPH_OSD_OP_FLAG_FAILOK)) {
result = p->rval;
C_CopyChunk(PrimaryLogPG *p, hobject_t o, epoch_t lpr,
const PrimaryLogPG::CopyOpRef& c)
: pg(p), oid(o), last_peering_reset(lpr),
- tid(0), cop(c)
+ tid(0), cop(c)
{}
void finish(int r) override {
if (r == -ECANCELED)
int num_chunks = 0;
uint64_t last_offset = 0, chunks_size = 0;
object_manifest_t *manifest = &obc->obs.oi.manifest;
- map<uint64_t, chunk_info_t>::iterator iter = manifest->chunk_map.find(start_offset);
+ map<uint64_t, chunk_info_t>::iterator iter = manifest->chunk_map.find(start_offset);
for (;iter != manifest->chunk_map.end(); ++iter) {
num_chunks++;
chunks_size += iter->second.length;
cop->start_offset = start_offset;
cop->last_offset = last_offset;
dout(20) << __func__ << " oid " << obc->obs.oi.soid << " num_chunks: " << num_chunks
- << " start_offset: " << start_offset << " chunks_size: " << chunks_size
+ << " start_offset: " << start_offset << " chunks_size: " << chunks_size
<< " last_offset: " << last_offset << dendl;
iter = manifest->chunk_map.find(start_offset);
fin->tid = tid;
sub_cop->objecter_tid = tid;
- dout(20) << __func__ << " tgt_oid: " << soid.oid << " tgt_offset: "
+ dout(20) << __func__ << " tgt_oid: " << soid.oid << " tgt_offset: "
<< manifest->chunk_map[iter->first].offset
- << " length: " << length << " pool id: " << oloc.pool
+ << " length: " << length << " pool id: " << oloc.pool
<< " tid: " << tid << dendl;
if (last_offset < iter->first) {
if (r < 0) {
obj_cop->failed = true;
goto out;
- }
+ }
if (obj_cop->failed) {
return;
- }
+ }
if (!chunk_data.outdata.length()) {
r = -EIO;
obj_cop->failed = true;
if (!ctx->lock_manager.take_write_lock(
obj_cop->obc->obs.oi.soid,
obj_cop->obc)) {
- // recovery op can take read lock.
- // so need to wait for recovery completion
+ // recovery op can take read lock.
+ // so need to wait for recovery completion
r = -EAGAIN;
obj_cop->failed = true;
close_op_ctx(ctx.release());
sub_chunk.outdata.length(),
sub_chunk.outdata,
p.second->dest_obj_fadvise_flags);
- dout(20) << __func__ << " offset: " << p.second->cursor.data_offset
+ dout(20) << __func__ << " offset: " << p.second->cursor.data_offset
<< " length: " << sub_chunk.outdata.length() << dendl;
write_update_size_and_usage(ctx->delta_stats, obs.oi, ctx->modified_ranges,
p.second->cursor.data_offset, sub_chunk.outdata.length());
- obs.oi.manifest.chunk_map[p.second->cursor.data_offset].clear_flag(chunk_info_t::FLAG_MISSING);
+ obs.oi.manifest.chunk_map[p.second->cursor.data_offset].clear_flag(chunk_info_t::FLAG_MISSING);
ctx->clean_regions.mark_data_region_dirty(p.second->cursor.data_offset, sub_chunk.outdata.length());
sub_chunk.outdata.clear();
}
obs.oi.clear_data_digest();
- ctx->at_version = get_next_version();
+ ctx->at_version = get_next_version();
finish_ctx(ctx.get(), pg_log_entry_t::PROMOTE);
simple_opc_submit(std::move(ctx));
}
return;
}
-
+
osd->promote_finish(results->object_size);
osd->logger->inc(l_osd_tier_promote);
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_OMAP);
ctx->clean_regions.mark_omap_dirty();
}
- if (obc->obs.oi.size == chunks_size) {
+ if (obc->obs.oi.size == chunks_size) {
t->truncate(oid, 0);
interval_set<uint64_t> trim;
trim.insert(0, ctx->new_obs.oi.size);
}
} else {
for (auto &p : ctx->new_obs.oi.manifest.chunk_map) {
- dout(20) << __func__ << " offset: " << p.second.offset
+ dout(20) << __func__ << " offset: " << p.second.offset
<< " length: " << p.second.length << dendl;
p.second.clear_flag(chunk_info_t::FLAG_MISSING); // CLEAN
}
for (auto &p : fop->io_tids) {
tids->push_back(p.second);
p.second = 0;
- }
+ }
}
if (fop->blocking && fop->obc->is_blocked()) {
fop->obc->stop_block();
dout(10) << " removing " << *repop << dendl;
ceph_assert(!repop_queue.empty());
- dout(20) << " q front is " << *repop_queue.front() << dendl;
+ dout(20) << " q front is " << *repop_queue.front() << dendl;
if (repop_queue.front() == repop) {
RepGather *to_remove = nullptr;
while (!repop_queue.empty() &&
dout(10) << __func__ << ": " << *repop << dendl;
return boost::intrusive_ptr<RepGather>(repop);
}
-
+
void PrimaryLogPG::remove_repop(RepGather *repop)
{
dout(20) << __func__ << " " << *repop << dendl;
{
ObjectContextRef obc(object_contexts.lookup_or_create(oi.soid));
ceph_assert(obc->destructor_callback == NULL);
- obc->destructor_callback = new C_PG_ObjectContext(this, obc.get());
+ obc->destructor_callback = new C_PG_ObjectContext(this, obc.get());
obc->obs.oi = oi;
obc->obs.exists = false;
obc->ssc = ssc;
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset << dendl;
-
+
// head?
if (oid.snap > ssc->snapset.seq) {
ObjectContextRef obc = get_object_context(head, false);
{
if (recovery_state.get_missing_loc().is_unfound(soid)) {
dout(7) << __func__ << " " << soid
- << " v " << v
+ << " v " << v
<< " but it is unfound" << dendl;
return PULL_NONE;
}
apply_and_flush_repops(false);
cancel_log_updates();
// we must remove PGRefs, so do this this prior to release_backoffs() callers
- clear_backoffs();
+ clear_backoffs();
// clean up snap trim references
snap_trimmer_machine.process_event(Reset());
if (needs_recovery()) {
// this shouldn't happen!
// We already checked num_missing() so we must have missing replicas
- osd->clog->error() << info.pgid
+ osd->clog->error() << info.pgid
<< " Unexpected Error: recovery ending with missing replicas";
return false;
}
{
if (item.have == latest->reverting_to) {
ObjectContextRef obc = get_object_context(soid, true);
-
+
if (obc->obs.oi.version == latest->version) {
// I'm already reverting
dout(10) << " already reverting " << soid << dendl;
break;
}
}
-
+
if (!recovering.count(soid)) {
if (recovering.count(head)) {
++skipped;
break;
}
}
-
+
// only advance last_requested if we haven't skipped anything
if (!skipped)
recovery_state.set_last_requested(v);
}
-
+
pgbackend->run_recovery_op(h, get_recovery_op_priority());
return started;
}
/** check_local
- *
+ *
* verifies that stray objects have been deleted
*/
void PrimaryLogPG::check_local()
/* NotTrimming */
PrimaryLogPG::NotTrimming::NotTrimming(my_context ctx)
- : my_base(ctx),
+ : my_base(ctx),
NamedState(nullptr, "NotTrimming")
{
context< SnapTrimmer >().log_enter(state_name);
{
return m_scrubber->write_blocked_by_scrub(oid);
}
+
void intrusive_ptr_add_ref(PrimaryLogPG *pg) { pg->get("intptr"); }
void intrusive_ptr_release(PrimaryLogPG *pg) { pg->put("intptr"); }