}
-__s64 Journaler::append_entry(bufferlist& bl, Context *onsync)
+__s64 Journaler::append_entry(bufferlist& bl)
{
uint32_t s = bl.length();
write_buf.claim_append(bl);
write_pos += sizeof(s) + s;
- // flush now?
- if (onsync)
- flush(onsync);
-
return write_pos;
}
void Journaler::wait_for_flush(Context *onsync, Context *onsafe, bool add_ack_barrier)
{
+ if (g_conf.journaler_safe && onsync) {
+ assert(!onsafe);
+ onsafe = onsync;
+ onsync = 0;
+ }
+
// all flushed and acked?
if (write_pos == ack_pos) {
assert(write_buf.length() == 0);
__s64 get_trimmed_pos() const { return trimmed_pos; }
// write
- __s64 append_entry(bufferlist& bl, Context *onsync = 0);
+ __s64 append_entry(bufferlist& bl);
void wait_for_flush(Context *onsync = 0, Context *onsafe = 0, bool add_ack_barrier=false);
void flush(Context *onsync = 0, Context *onsafe = 0, bool add_ack_barrier=false);