range_del_agg_(range_del_agg),
compaction_(std::move(compaction)),
compaction_filter_(compaction_filter),
+#ifndef ROCKSDB_LITE
compaction_listener_(compaction_listener),
+#endif // ROCKSDB_LITE
shutting_down_(shutting_down),
ignore_snapshots_(false),
merge_out_iter_(merge_helper_) {
RangeDelAggregator* range_del_agg_;
std::unique_ptr<CompactionProxy> compaction_;
const CompactionFilter* compaction_filter_;
+#ifndef ROCKSDB_LITE
CompactionEventListener* compaction_listener_;
+#endif // ROCKSDB_LITE
const std::atomic<bool>* shutting_down_;
bool bottommost_level_;
bool valid_ = false;
// Actually written data size can be used for truncate
// not counting padding data
uint64_t filesize_;
+#ifndef ROCKSDB_LITE
// This is necessary when we use unbuffered access
// and writes must happen on aligned offsets
// so we need to go back and write that page again
uint64_t next_write_offset_;
+#endif // ROCKSDB_LITE
bool pending_sync_;
uint64_t last_sync_size_;
uint64_t bytes_per_sync_;
buf_(),
max_buffer_size_(options.writable_file_max_buffer_size),
filesize_(0),
+#ifndef ROCKSDB_LITE
next_write_offset_(0),
+#endif // ROCKSDB_LITE
pending_sync_(false),
last_sync_size_(0),
bytes_per_sync_(options.bytes_per_sync),