Summary: Makefile had options to ignore sign-comparisons and unused-parameters, which should be there. Also fixed the specific errors in the code-base
Test Plan: make
Reviewers: chip, dhruba
Reviewed By: dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D9531
# this file is generated by the previous line to set build flags and sources
include build_config.mk
-WARNING_FLAGS = -Wall -Werror -Wno-unused-parameter -Wno-sign-compare
+WARNING_FLAGS = -Wall -Werror
CFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
CXXFLAGS += -g $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -std=gnu++0x
static bool FLAGS_get_approx = false;
// The total number of levels
-static unsigned int FLAGS_num_levels = 7;
+static int FLAGS_num_levels = 7;
// Target level-0 file size for compaction
static int FLAGS_target_file_size_base = 2 * 1048576;
// large enough to serve all typical value sizes we want to write.
Random rnd(301);
std::string piece;
- while (data_.size() < std::max(1048576, FLAGS_value_size)) {
+ while (data_.size() < (unsigned)std::max(1048576, FLAGS_value_size)) {
// Add a short fragment that is as compressible as specified
// by FLAGS_compression_ratio.
test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
pos_ = 0;
}
- Slice Generate(int len) {
+ Slice Generate(unsigned int len) {
if (pos_ + len > data_.size()) {
pos_ = 0;
assert(len < data_.size());
for (int i = 0; i < FLAGS_min_level_to_compress; i++) {
options.compression_per_level[i] = kNoCompression;
}
- for (unsigned int i = FLAGS_min_level_to_compress;
+ for (int i = FLAGS_min_level_to_compress;
i < FLAGS_num_levels; i++) {
options.compression_per_level[i] = FLAGS_compression_type;
}
options_.db_log_dir.empty()) {
std::sort(old_log_files.begin(), old_log_files.end());
size_t end = old_log_file_count - options_.keep_log_file_num;
- for (int i = 0; i <= end; i++) {
+ for (unsigned int i = 0; i <= end; i++) {
std::string& to_delete = old_log_files.at(i);
// Log(options_.info_log, "Delete type=%d %s\n",
// int(kInfoLogFile), to_delete.c_str());
stall_leveln_slowdown_[max_level] += delayed;
// Make sure the following value doesn't round to zero.
rate_limit_delay_millis += std::max((delayed / 1000), (uint64_t) 1);
- if (rate_limit_delay_millis >= options_.rate_limit_delay_milliseconds) {
+ if (rate_limit_delay_millis >=
+ (unsigned)options_.rate_limit_delay_milliseconds) {
allow_rate_limit_delay = false;
}
// Log(options_.info_log,
};
static const int kMaxRecordType = kLastType;
-static const int kBlockSize = 32768;
+static const unsigned int kBlockSize = 32768;
// Header is checksum (4 bytes), type (1 byte), length (2 bytes).
static const int kHeaderSize = 4 + 1 + 2;
const Slice& user_begin,
const Slice& user_end,
std::vector<FileMetaData*>* inputs,
- int midIndex) {
+ unsigned int midIndex) {
const Comparator* user_cmp = vset_->icmp_.user_comparator();
#ifndef NDEBUG
#endif
}
- void CheckConsistencyForDeletes(VersionEdit* edit, int number, int level) {
+ void CheckConsistencyForDeletes(
+ VersionEdit* edit,
+ unsigned int number,
+ int level) {
#ifndef NDEBUG
// a file to be deleted better exist in the previous version
bool found = false;
for (int l = 0; !found && l < edit->number_levels_; l++) {
const std::vector<FileMetaData*>& base_files = base_->files_[l];
- for (int i = 0; i < base_files.size(); i++) {
+ for (unsigned int i = 0; i < base_files.size(); i++) {
FileMetaData* f = base_files[i];
if (f->number == number) {
found = true;
const Slice& begin, // nullptr means before all keys
const Slice& end, // nullptr means after all keys
std::vector<FileMetaData*>* inputs,
- int index); // start extending from this index
+ unsigned int index); // start extending from this index
// Returns true iff some file in the specified level overlaps
// some part of [*smallest_user_key,*largest_user_key].
double rate_limit;
// Max time a put will be stalled when rate_limit is enforced
- int rate_limit_delay_milliseconds;
+ unsigned int rate_limit_delay_milliseconds;
// manifest file is rolled over on reaching this limit.
// The older manifest file be deleted.
num_ += other.num_;
sum_ += other.sum_;
sum_squares_ += other.sum_squares_;
- for (int b = 0; b < bucketMapper.BucketCount(); b++) {
+ for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
buckets_[b] += other.buckets_[b];
}
}
double HistogramImpl::Percentile(double p) const {
double threshold = num_ * (p / 100.0);
double sum = 0;
- for (int b = 0; b < bucketMapper.BucketCount(); b++) {
+ for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
sum += buckets_[b];
if (sum >= threshold) {
// Scale linearly within this bucket
r.append("------------------------------------------------------\n");
const double mult = 100.0 / num_;
double sum = 0;
- for (int b = 0; b < bucketMapper.BucketCount(); b++) {
+ for (unsigned int b = 0; b < bucketMapper.BucketCount(); b++) {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
snprintf(buf, sizeof(buf),
Log(log," Options.block_size: %zd", block_size);
Log(log," Options.block_restart_interval: %d", block_restart_interval);
if (!compression_per_level.empty()) {
- for (int i = 0; i < compression_per_level.size(); i++) {
+ for (unsigned int i = 0; i < compression_per_level.size(); i++) {
Log(log," Options.compression[%d]: %d",
i, compression_per_level[i]);
}