encoding the updated one.
If this is not done, urgent data keeps getting appended
to the head, finally overwriting queue data that results
in invalid entries.
Signed-off-by: Pritha Srivastava <prsrivas@redhat.com>
bl.claim_append(bl_head);
+ if (bl.length() > head.max_head_size) {
+ CLS_LOG(0, "ERROR: queue_write_head: invalid head size = %u and urgent data size = %u \n", bl.length(), head.bl_urgent_data.length());
+ return -EINVAL;
+ }
+
int ret = cls_cxx_write2(hctx, 0, bl.length(), &bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
if (ret < 0) {
CLS_LOG(5, "ERROR: queue_write_head: failed to write head");
}
//Update urgent data map
+ head.bl_urgent_data.clear();
encode(urgent_data, head.bl_urgent_data);
+ CLS_LOG(5, "INFO: cls_rgw_gc_queue_remove_entries(): Urgent data size is %u\n", head.bl_urgent_data.length());
return queue_write_head(hctx, head);
}