bucket_found = true;
break;
} else {
- if (user_key.compare(ExtractUserKey(buckets_[hash_val].key)) == 0) {
+ if (user_key.compare(
+ is_last_level_file_ ? Slice(buckets_[hash_val].key)
+ : ExtractUserKey(Slice(buckets_[hash_val].key))) == 0) {
status_ = Status::Corruption("Same key is being inserted again.");
return;
}
hash_vals.push_back(hash_val);
}
}
- buckets_[bucket_id].key = key;
- buckets_[bucket_id].value = value;
+ if (is_last_level_file_) {
+ buckets_[bucket_id].key.assign(user_key.data(), user_key.size());
+ } else {
+ buckets_[bucket_id].key.assign(key.data(), key.size());
+ }
+ buckets_[bucket_id].value.assign(value.data(), value.size());
buckets_[bucket_id].is_empty = false;
properties_.num_entries++;
if (prev_key_.empty()) {
return Status::Corruption("Unable to find unused key");
}
+ // Try to find the key next to prev_key_ by handling carryovers.
std::string new_user_key = prev_key_;
- new_user_key.back()++;
- // We ignore carry-overs and check that it is larger than previous key.
- if (new_user_key > prev_key_) {
- unused_user_key_ = new_user_key;
- } else {
+ int curr_pos = new_user_key.size() - 1;
+ while (curr_pos >= 0) {
+ ++new_user_key[curr_pos];
+ if (new_user_key > prev_key_) {
+ unused_user_key_ = new_user_key;
+ break;
+ }
+ --curr_pos;
+ }
+ if (curr_pos < 0) {
return Status::Corruption("Unable to find unused key");
}
}
s = file_->Append(Slice(unused_bucket));
} else {
++num_added;
- if (is_last_level_file_) {
- Slice user_key = ExtractUserKey(bucket.key);
- s = file_->Append(user_key);
- if (s.ok()) {
- s = file_->Append(bucket.value);
- }
- } else {
- s = file_->Append(bucket.key);
- if (s.ok()) {
- s = file_->Append(bucket.value);
- }
+ s = file_->Append(Slice(bucket.key));
+ if (s.ok()) {
+ s = file_->Append(Slice(bucket.value));
}
}
if (!s.ok()) {
CuckooBucket& curr_bucket = buckets_[curr_node.bucket_id];
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_table_; ++hash_cnt) {
uint64_t child_bucket_id = GetSliceHash(
- ExtractUserKey(curr_bucket.key), hash_cnt, max_num_buckets_);
+ is_last_level_file_ ? curr_bucket.key
+ : ExtractUserKey(Slice(curr_bucket.key)),
+ hash_cnt, max_num_buckets_);
if (buckets_[child_bucket_id].make_space_for_key_call_id ==
make_space_for_key_call_id_) {
continue;
// Performance tests
namespace {
-void GenerateKeys(uint64_t num, std::vector<std::string>* keys,
- uint32_t user_key_length) {
- for (uint64_t i = 0; i < num; ++i) {
- std::string new_key(reinterpret_cast<char*>(&i), sizeof(i));
- new_key = std::string(user_key_length - new_key.size(), 'k') + new_key;
- ParsedInternalKey ikey(new_key, num, kTypeValue);
- std::string full_key;
- AppendInternalKey(&full_key, ikey);
- keys->push_back(full_key);
- }
-}
-
bool DoNothing(void* arg, const ParsedInternalKey& k, const Slice& v) {
// Deliberately empty.
return false;
void BM_CuckooRead(uint64_t num, uint32_t key_length,
uint32_t value_length, uint64_t num_reads, double hash_ratio) {
assert(value_length <= key_length);
+ assert(8 <= key_length);
std::vector<std::string> keys;
Options options;
options.allow_mmap_reads = true;
}
std::string fname = FLAGS_file_dir + "/cuckoo_read_benchmark";
- GenerateKeys(num, &keys, key_length);
uint64_t predicted_file_size =
num * (key_length + value_length) / hash_ratio + 1024;
unique_ptr<WritableFile> writable_file;
ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options));
CuckooTableBuilder builder(
- writable_file.get(), keys[0].size(), value_length, hash_ratio,
+ writable_file.get(), key_length + 8, value_length, hash_ratio,
predicted_file_size, kMaxNumHashTable, 1000, true, GetSliceMurmurHash);
ASSERT_OK(builder.status());
- for (uint32_t key_idx = 0; key_idx < num; ++key_idx) {
+ for (uint64_t key_idx = 0; key_idx < num; ++key_idx) {
// Value is just a part of key.
- builder.Add(Slice(keys[key_idx]), Slice(&keys[key_idx][0], value_length));
+ std::string new_key(reinterpret_cast<char*>(&key_idx), sizeof(key_idx));
+ new_key = std::string(key_length - new_key.size(), 'k') + new_key;
+ ParsedInternalKey ikey(new_key, num, kTypeValue);
+ std::string full_key;
+ AppendInternalKey(&full_key, ikey);
+ builder.Add(Slice(full_key), Slice(&full_key[0], value_length));
ASSERT_EQ(builder.NumEntries(), key_idx + 1);
ASSERT_OK(builder.status());
+ keys.push_back(full_key);
}
ASSERT_OK(builder.Finish());
ASSERT_EQ(num, builder.NumEntries());