// Unlock during expensive MANIFEST log write. New writes cannot get here
// because &w is ensuring that all new writes get queued.
{
+ // calculate the amount of data being compacted at every level
+ std::vector<uint64_t> size_being_compacted(NumberLevels()-1);
+ SizeBeingCompacted(size_being_compacted);
+
mu->Unlock();
- // The calles to Finalize and UpdateFilesBySize are cpu-heavy
+ // The calls to Finalize and UpdateFilesBySize are cpu-heavy
// and is best called outside the mutex.
- Finalize(v);
+ Finalize(v, size_being_compacted);
UpdateFilesBySize(v);
// Write new record to MANIFEST log
if (s.ok()) {
Version* v = new Version(this, current_version_number_++);
builder.SaveTo(v);
+
// Install recovered version
- Finalize(v);
+ std::vector<uint64_t> size_being_compacted(NumberLevels()-1);
+ SizeBeingCompacted(size_being_compacted);
+ Finalize(v, size_being_compacted);
+
v->offset_manifest_file_ = manifest_file_size;
AppendVersion(v);
manifest_file_number_ = next_file;
if (s.ok()) {
Version* v = new Version(this, 0);
builder.SaveTo(v);
+
// Install recovered version
- Finalize(v);
+ std::vector<uint64_t> size_being_compacted(NumberLevels()-1);
+ SizeBeingCompacted(size_being_compacted);
+ Finalize(v, size_being_compacted);
+
AppendVersion(v);
manifest_file_number_ = next_file;
next_file_number_ = next_file + 1;
}
}
-void VersionSet::Finalize(Version* v) {
+void VersionSet::Finalize(Version* v,
+ std::vector<uint64_t>& size_being_compacted) {
double max_score = 0;
int max_score_level = 0;
} else {
// Compute the ratio of current size to size limit.
const uint64_t level_bytes = TotalFileSize(v->files_[level]) -
- SizeBeingCompacted(level);
+ size_being_compacted[level];
score = static_cast<double>(level_bytes) / MaxBytesForLevel(level);
if (score > 1) {
// Log(options_->info_log, "XXX score l%d = %d ", level, (int)score);
}
// The total size of files that are currently being compacted
-uint64_t VersionSet::SizeBeingCompacted(int level) {
- uint64_t total = 0;
- for (std::set<Compaction*>::iterator it =
- compactions_in_progress_[level].begin();
- it != compactions_in_progress_[level].end();
- ++it) {
- Compaction* c = (*it);
- assert(c->level() == level);
- for (int i = 0; i < c->num_input_files(0); i++) {
- total += c->input(0,i)->file_size;
+// at at every level upto the penultimate level.
+void VersionSet::SizeBeingCompacted(std::vector<uint64_t>& sizes) {
+ for (int level = 0; level < NumberLevels()-1; level++) {
+ uint64_t total = 0;
+ for (std::set<Compaction*>::iterator it =
+ compactions_in_progress_[level].begin();
+ it != compactions_in_progress_[level].end();
+ ++it) {
+ Compaction* c = (*it);
+ assert(c->level() == level);
+ for (int i = 0; i < c->num_input_files(0); i++) {
+ total += c->input(0,i)->file_size;
+ }
}
+ sizes[level] = total;
}
- return total;
}
Compaction* VersionSet::PickCompactionBySize(int level, double score) {
// compute the compactions needed. It is better to do it here
// and also in LogAndApply(), otherwise the values could be stale.
- Finalize(current_);
+ std::vector<uint64_t> size_being_compacted(NumberLevels()-1);
+ current_->vset_->SizeBeingCompacted(size_being_compacted);
+ Finalize(current_, size_being_compacted);
// We prefer compactions triggered by too much data in a level over
// the compactions triggered by seeks.
void Init(int num_levels);
- void Finalize(Version* v);
+ void Finalize(Version* v, std::vector<uint64_t>&);
void GetRange(const std::vector<FileMetaData*>& inputs,
InternalKey* smallest,
void operator=(const VersionSet&);
// Return the total amount of data that is undergoing
- // compactions at this level
- uint64_t SizeBeingCompacted(int level);
+ // compactions per level
+ void SizeBeingCompacted(std::vector<uint64_t>&);
// Returns true if any one of the parent files are being compacted
bool ParentRangeInCompaction(const InternalKey* smallest,