"Use adaptive mutex");
DEFINE_uint64(bytes_per_sync, rocksdb::Options().bytes_per_sync,
- "Allows OS to incrementally sync files to disk while they are"
+ "Allows OS to incrementally sync SST files to disk while they are"
" being written, in the background. Issue one request for every"
" bytes_per_sync written. 0 turns it off.");
+
+DEFINE_uint64(wal_bytes_per_sync, rocksdb::Options().wal_bytes_per_sync,
+ "Allows OS to incrementally sync WAL files to disk while they are"
+ " being written, in the background. Issue one request for every"
+ " wal_bytes_per_sync written. 0 turns it off.");
+
DEFINE_bool(filter_deletes, false, " On true, deletes use bloom-filter and drop"
" the delete if key not present");
options.access_hint_on_compaction_start = FLAGS_compaction_fadvice_e;
options.use_adaptive_mutex = FLAGS_use_adaptive_mutex;
options.bytes_per_sync = FLAGS_bytes_per_sync;
+ options.wal_bytes_per_sync = FLAGS_wal_bytes_per_sync;
// merge operator options
options.merge_operator = MergeOperators::CreateFromStringId(
uint64_t wal_synced = db_stats_[InternalStats::WAL_FILE_SYNCED];
uint64_t write_with_wal = db_stats_[InternalStats::WRITE_WITH_WAL];
uint64_t write_stall_micros = db_stats_[InternalStats::WRITE_STALL_MICROS];
+ uint64_t compact_bytes_read = 0;
+ uint64_t compact_bytes_write = 0;
+ uint64_t compact_micros = 0;
+
const int kHumanMicrosLen = 32;
char human_micros[kHumanMicrosLen];
write_with_wal / static_cast<double>(wal_synced + 1),
wal_bytes / kGB, wal_bytes / kMB / seconds_up);
value->append(buf);
+ // Compact
+ for (int level = 0; level < number_levels_; level++) {
+ compact_bytes_read += comp_stats_[level].bytes_readnp1 +
+ comp_stats_[level].bytes_readn;
+ compact_bytes_write += comp_stats_[level].bytes_written;
+ compact_micros += comp_stats_[level].micros;
+ }
+ snprintf(buf, sizeof(buf),
+ "Cumulative compaction: %.2f GB write, %.2f MB/s write, "
+ "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
+ compact_bytes_write / kGB,
+ compact_bytes_write / kMB / seconds_up,
+ compact_bytes_read / kGB,
+ compact_bytes_read / kMB / seconds_up,
+ compact_micros / 1000000.0);
+ value->append(buf);
// Stall
AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true);
snprintf(buf, sizeof(buf),
interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001));
value->append(buf);
+ // Compaction
+ uint64_t interval_compact_bytes_write =
+ compact_bytes_write - db_stats_snapshot_.compact_bytes_write;
+ uint64_t interval_compact_bytes_read =
+ compact_bytes_read - db_stats_snapshot_.compact_bytes_read;
+ uint64_t interval_compact_micros =
+ compact_micros - db_stats_snapshot_.compact_micros;
+
+ snprintf(buf, sizeof(buf),
+ "Interval compaction: %.2f GB write, %.2f MB/s write, "
+ "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
+ interval_compact_bytes_write / kGB,
+ interval_compact_bytes_write / kMB /
+ std::max(interval_seconds_up, 0.001),
+ interval_compact_bytes_read / kGB,
+ interval_compact_bytes_read / kMB /
+ std::max(interval_seconds_up, 0.001),
+ interval_compact_micros / 1000000.0);
+ value->append(buf);
+
// Stall
AppendHumanMicros(
write_stall_micros - db_stats_snapshot_.write_stall_micros,
db_stats_snapshot_.wal_synced = wal_synced;
db_stats_snapshot_.write_with_wal = write_with_wal;
db_stats_snapshot_.write_stall_micros = write_stall_micros;
+ db_stats_snapshot_.compact_bytes_write = compact_bytes_write;
+ db_stats_snapshot_.compact_bytes_read = compact_bytes_read;
+ db_stats_snapshot_.compact_micros = compact_micros;
}
void InternalStats::DumpCFStats(std::string* value) {