}
}
+ /* Clears weakrefs in the interval [from, to] -- note that to is inclusive */
+ void clear_range(
+ const K& from,
+ const K& to) {
+ list<VPtr> vals; // release any refs we have after we drop the lock
+ {
+ std::lock_guard l{lock};
+ auto from_iter = weak_refs.lower_bound(from);
+ auto to_iter = weak_refs.upper_bound(to);
+ for (auto i = from_iter; i != to_iter; ) {
+ vals.push_back(i->second.first.lock());
+ lru_remove((i++)->first);
+ }
+ }
+ }
+
+
void purge(const K &key) {
VPtr val; // release any ref we have after we drop the lock
{
osd->queue_recovery_context(this, c);
}
+void PrimaryLogPG::replica_clear_repop_obc(
+ const vector<pg_log_entry_t> &logv,
+ ObjectStore::Transaction &t)
+{
+ for (auto &&e: logv) {
+ /* Have to blast all clones, they share a snapset */
+ object_contexts.clear_range(
+ e.soid.get_object_boundary(), e.soid.get_head());
+ ceph_assert(
+ snapset_contexts.find(e.soid.get_head()) ==
+ snapset_contexts.end());
+ }
+}
+
bool PrimaryLogPG::should_send_op(
pg_shard_t peer,
const hobject_t &hoid) {
projected_log.skip_can_rollback_to_to_head();
projected_log.trim(cct, last->version, nullptr, nullptr, nullptr);
}
+ if (!is_primary() && !is_ec_pg()) {
+ replica_clear_repop_obc(logv, t);
+ }
recovery_state.append_log(
logv, trim_to, roll_forward_to, min_last_complete_ondisk,
t, transaction_applied, async);
}
+ void replica_clear_repop_obc(
+ const vector<pg_log_entry_t> &logv,
+ ObjectStore::Transaction &t);
+
void op_applied(const eversion_t &applied_version) override;
bool should_send_op(