p != info.hit_set.history.end();
++p)
ls.push_back(make_pair(p->begin, p->end));
- if (info.hit_set.current_info.begin)
- ls.push_back(make_pair(info.hit_set.current_info.begin, utime_t()));
- else if (hit_set)
+ if (hit_set)
ls.push_back(make_pair(hit_set_start_stamp, utime_t()));
::encode(ls, osd_op.outdata);
}
case CEPH_OSD_OP_PG_HITSET_GET:
{
utime_t stamp(osd_op.op.hit_set_get.stamp);
- if ((info.hit_set.current_info.begin &&
- stamp >= info.hit_set.current_info.begin) ||
- stamp >= hit_set_start_stamp) {
+ if (hit_set_start_stamp && stamp >= hit_set_start_stamp) {
// read the current in-memory HitSet, not the version we've
// checkpointed.
if (!hit_set) {
return;
}
- utime_t start = info.hit_set.current_info.begin;
- if (!start)
- start = hit_set_start_stamp;
- oid = get_hit_set_archive_object(start, now, pool.info.use_gmt_hitset);
- // If the current object is degraded we skip this persist request
- if (scrubber.write_blocked_by_scrub(oid, get_sort_bitwise()))
- return;
-
// If backfill is in progress and we could possibly overlap with the
// hit_set_* objects, back off. Since these all have
// hobject_t::hash set to pgid.ps(), and those sort first, we can
}
}
- if (!info.hit_set.current_info.begin)
- info.hit_set.current_info.begin = hit_set_start_stamp;
+
+ pg_hit_set_info_t new_hset = pg_hit_set_info_t(pool.info.use_gmt_hitset);
+ new_hset.begin = hit_set_start_stamp;
+ new_hset.end = now;
+ oid = get_hit_set_archive_object(
+ new_hset.begin,
+ new_hset.end,
+ new_hset.using_gmt);
+
+ // If the current object is degraded we skip this persist request
+ if (scrubber.write_blocked_by_scrub(oid, get_sort_bitwise()))
+ return;
hit_set->seal();
::encode(*hit_set, bl);
- info.hit_set.current_info.end = now;
dout(20) << __func__ << " archive " << oid << dendl;
if (agent_state) {
- agent_state->add_hit_set(info.hit_set.current_info.begin, hit_set);
+ agent_state->add_hit_set(new_hset.begin, hit_set);
uint32_t size = agent_state->hit_set_map.size();
if (size >= pool.info.hit_set_count) {
size = pool.info.hit_set_count > 0 ? pool.info.hit_set_count - 1: 0;
}
// hold a ref until it is flushed to disk
- hit_set_flushing[info.hit_set.current_info.begin] = hit_set;
- flush_time = info.hit_set.current_info.begin;
+ hit_set_flushing[new_hset.begin] = hit_set;
+ flush_time = new_hset.begin;
ObjectContextRef obc = get_object_context(oid, true);
repop = simple_repop_create(obc);
ctx->updated_hset_history = info.hit_set;
pg_hit_set_history_t &updated_hit_set_hist = *(ctx->updated_hset_history);
-
updated_hit_set_hist.current_last_update = info.last_update;
- updated_hit_set_hist.current_info.version = ctx->at_version;
+ new_hset.version = ctx->at_version;
- updated_hit_set_hist.history.push_back(updated_hit_set_hist.current_info);
+ updated_hit_set_hist.history.push_back(new_hset);
hit_set_create();
- updated_hit_set_hist.current_info = pg_hit_set_info_t(pool.info.use_gmt_hitset);
// fabricate an object_info_t and SnapSet
obc->obs.oi.version = ctx->at_version;
utime_t dummy_stamp;
::encode(dummy_stamp, bl);
}
- ::encode(current_info, bl);
+ {
+ pg_hit_set_info_t dummy_info;
+ ::encode(dummy_info, bl);
+ }
::encode(history, bl);
ENCODE_FINISH(bl);
}
utime_t dummy_stamp;
::decode(dummy_stamp, p);
}
- ::decode(current_info, p);
+ {
+ pg_hit_set_info_t dummy_info;
+ ::decode(dummy_info, p);
+ }
::decode(history, p);
DECODE_FINISH(p);
}
void pg_hit_set_history_t::dump(Formatter *f) const
{
f->dump_stream("current_last_update") << current_last_update;
- f->open_object_section("current_info");
- current_info.dump(f);
- f->close_section();
f->open_array_section("history");
for (list<pg_hit_set_info_t>::const_iterator p = history.begin();
p != history.end(); ++p) {
ls.push_back(new pg_hit_set_history_t);
ls.push_back(new pg_hit_set_history_t);
ls.back()->current_last_update = eversion_t(1, 2);
- ls.back()->current_info.begin = utime_t(2, 4);
- ls.back()->current_info.end = utime_t(62, 24);
- ls.back()->history.push_back(ls.back()->current_info);
ls.back()->history.push_back(pg_hit_set_info_t());
}