tags
java/*.log
java/include/org_rocksdb_*.h
+unity.cc
echo $$t $$((etime - stime)) >> $(VALGRIND_DIR)/valgrind_tests_times; \
done
+unity.cc:
+ $(shell (export ROCKSDB_ROOT="$(CURDIR)"; "$(CURDIR)/build_tools/unity" "$(CURDIR)/unity.cc"))
+
+unity: unity.cc unity.o
+ $(CXX) unity.o $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
+
clean:
- -rm -f $(PROGRAMS) $(TESTS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) build_config.mk
+ -rm -f $(PROGRAMS) $(TESTS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) build_config.mk unity.cc
-rm -rf ios-x86/* ios-arm/*
-find . -name "*.[od]" -exec rm {} \;
-find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
--- /dev/null
+#!/bin/sh
+#
+# Create the unity file
+#
+
+OUTPUT=$1
+if test -z "$OUTPUT"; then
+ echo "usage: $0 <output-filename>" >&2
+ exit 1
+fi
+
+# Delete existing file, if it exists
+rm -f "$OUTPUT"
+touch "$OUTPUT"
+
+# Detect OS
+if test -z "$TARGET_OS"; then
+ TARGET_OS=`uname -s`
+fi
+
+# generic port files (working on all platform by #ifdef) go directly in /port
+GENERIC_PORT_FILES=`cd "$ROCKSDB_ROOT"; find port -name '*.cc' | tr "\n" " "`
+
+# On GCC, we pick libc's memcmp over GCC's memcmp via -fno-builtin-memcmp
+case "$TARGET_OS" in
+ Darwin)
+ # PORT_FILES=port/darwin/darwin_specific.cc
+ ;;
+ IOS)
+ ;;
+ Linux)
+ # PORT_FILES=port/linux/linux_specific.cc
+ ;;
+ SunOS)
+ # PORT_FILES=port/sunos/sunos_specific.cc
+ ;;
+ FreeBSD)
+ # PORT_FILES=port/freebsd/freebsd_specific.cc
+ ;;
+ NetBSD)
+ # PORT_FILES=port/netbsd/netbsd_specific.cc
+ ;;
+ OpenBSD)
+ # PORT_FILES=port/openbsd/openbsd_specific.cc
+ ;;
+ DragonFly)
+ # PORT_FILES=port/dragonfly/dragonfly_specific.cc
+ ;;
+ OS_ANDROID_CROSSCOMPILE)
+ # PORT_FILES=port/android/android.cc
+ ;;
+ *)
+ echo "Unknown platform!" >&2
+ exit 1
+esac
+
+# We want to make a list of all cc files within util, db, table, and helpers
+# except for the test and benchmark files. By default, find will output a list
+# of all files matching either rule, so we need to append -print to make the
+# prune take effect.
+DIRS="util db table utilities"
+
+set -f # temporarily disable globbing so that our patterns arent expanded
+PRUNE_TEST="-name *test*.cc -prune"
+PRUNE_BENCH="-name *bench*.cc -prune"
+PORTABLE_FILES=`cd "$ROCKSDB_ROOT"; find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o -name '*.cc' -print | sort`
+PORTABLE_CPP=`cd "$ROCKSDB_ROOT"; find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o -name '*.cpp' -print | sort`
+set +f # re-enable globbing
+
+# The sources consist of the portable files, plus the platform-specific port
+# file.
+for SOURCE_FILE in $PORTABLE_FILES $GENERIC_PORT_FILES $PORT_FILES $PORTABLE_CPP
+do
+ echo "#include <$SOURCE_FILE>" >> "$OUTPUT"
+done
+
+echo "int main(int argc, char** argv){ return 0; }" >> "$OUTPUT"
+
uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); }
-namespace {
-// Fix user-supplied options to be reasonable
-template <class T, class V>
-static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
- if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
- if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
-}
-} // anonymous namespace
-
ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const ColumnFamilyOptions& src) {
namespace rocksdb {
-static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
+uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->fd.GetFileSize();
void ResetNextCompactionIndex();
};
+// Utility function
+extern uint64_t TotalFileSize(const std::vector<FileMetaData*>& files);
+
} // namespace rocksdb
namespace rocksdb {
+uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
+ uint64_t sum = 0;
+ for (size_t i = 0; i < files.size() && files[i]; i++) {
+ sum += files[i]->compensated_file_size;
+ }
+ return sum;
+}
+
namespace {
// Determine compression type, based on user options, level of the output
// file and whether compression is disabled.
}
}
-uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
- uint64_t sum = 0;
- for (size_t i = 0; i < files.size() && files[i]; i++) {
- sum += files[i]->compensated_file_size;
- }
- return sum;
-}
-
// Multiple two operands. If they overflow, return op1.
uint64_t MultiplyCheckOverflow(uint64_t op1, int op2) {
if (op1 == 0) {
}
};
+// Utility function
+extern uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files);
+
} // namespace rocksdb
}
};
-namespace {
-// Fix user-supplied options to be reasonable
-template <class T, class V>
-static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
- if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
- if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
-}
-} // anonymous namespace
-
Options SanitizeOptions(const std::string& dbname,
const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const Options& src);
extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
+// Fix user-supplied options to be reasonable
+template <class T, class V>
+static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
+ if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
+ if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
+}
+
} // namespace rocksdb
};
} // anonymous namespace
-static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
- uint64_t sum = 0;
- for (size_t i = 0; i < files.size() && files[i]; i++) {
- sum += files[i]->fd.GetFileSize();
- }
- return sum;
-}
-
-static uint64_t TotalCompensatedFileSize(
- const std::vector<FileMetaData*>& files) {
- uint64_t sum = 0;
- for (size_t i = 0; i < files.size() && files[i]; i++) {
- sum += files[i]->compensated_file_size;
- }
- return sum;
-}
-
Version::~Version() {
assert(refs_ == 0);
}
// Callback from TableCache::Get()
-namespace {
enum SaverState {
kNotFound,
kFound,
kCorrupt,
kMerge // saver contains the current merge result (the operands)
};
+
+namespace version_set {
struct Saver {
SaverState state;
const Comparator* ucmp;
Logger* logger;
Statistics* statistics;
};
-}
+} // namespace version_set
// Called from TableCache::Get and Table::Get when file/block in which
// key may exist are not there in TableCache/BlockCache respectively. In this
// IO to be certain.Set the status=kFound and value_found=false to let the
// caller know that key may exist but is not there in memory
static void MarkKeyMayExist(void* arg) {
- Saver* s = reinterpret_cast<Saver*>(arg);
+ version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
s->state = kFound;
if (s->value_found != nullptr) {
*(s->value_found) = false;
static bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
const Slice& v) {
- Saver* s = reinterpret_cast<Saver*>(arg);
+ version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
MergeContext* merge_contex = s->merge_context;
std::string merge_result; // temporary area for merge results later
Slice user_key = k.user_key();
assert(status->ok() || status->IsMergeInProgress());
- Saver saver;
+ version_set::Saver saver;
saver.state = status->ok()? kNotFound : kMerge;
saver.ucmp = user_comparator_;
saver.user_key = user_key;
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
-namespace {
typedef BlockBasedTableOptions::IndexType IndexType;
return raw;
}
-} // anonymous namespace
-
// kBlockBasedTableMagicNumber was picked by running
// echo rocksdb.table.block_based | sha1sum
// and taking the leading 64 bits.
namespace rocksdb {
-namespace {
-
inline uint32_t Hash(const Slice& s) {
return rocksdb::Hash(s.data(), s.size(), 0);
}
return Hash(prefix) % num_buckets;
}
-
-
// The prefix block index is simply a bucket array, with each entry pointing to
// the blocks that span the prefixes hashed to this bucket.
//
return index | kBlockArrayMask;
}
-
// temporary storage for prefix information during index building
struct PrefixRecord {
Slice prefix;
PrefixRecord* next;
};
-} // anonymous namespace
-
class BlockPrefixIndex::Builder {
public:
explicit Builder(const SliceTransform* internal_prefix_extractor)
#include "util/autovector.h"
namespace rocksdb {
-namespace {
+namespace merger {
typedef std::priority_queue<
IteratorWrapper*,
std::vector<IteratorWrapper*>,
MinIterHeap NewMinIterHeap(const Comparator* comparator) {
return MinIterHeap(MinIteratorComparator(comparator));
}
-} // namespace
+} // namespace merger
const size_t kNumIterReserve = 4;
current_(nullptr),
use_heap_(true),
direction_(kForward),
- maxHeap_(NewMaxIterHeap(comparator_)),
- minHeap_(NewMinIterHeap(comparator_)) {
+ maxHeap_(merger::NewMaxIterHeap(comparator_)),
+ minHeap_(merger::NewMinIterHeap(comparator_)) {
children_.resize(n);
for (int i = 0; i < n; i++) {
children_[i].Set(children[i]);
kReverse
};
Direction direction_;
- MaxIterHeap maxHeap_;
- MinIterHeap minHeap_;
+ merger::MaxIterHeap maxHeap_;
+ merger::MinIterHeap minHeap_;
};
void MergingIterator::FindSmallest() {
void MergingIterator::ClearHeaps() {
use_heap_ = true;
- maxHeap_ = NewMaxIterHeap(comparator_);
- minHeap_ = NewMinIterHeap(comparator_);
+ maxHeap_ = merger::NewMaxIterHeap(comparator_);
+ minHeap_ = merger::NewMinIterHeap(comparator_);
}
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n,
namespace rocksdb {
namespace {
-static uint32_t BloomHash(const Slice& key) {
- return Hash(key.data(), key.size(), 0xbc9f1d34);
-}
class BloomFilterPolicy : public FilterPolicy {
private:
namespace rocksdb {
namespace {
-static uint32_t BloomHash(const Slice& key) {
- return Hash(key.data(), key.size(), 0xbc9f1d34);
-}
uint32_t GetTotalBitsForLocality(uint32_t total_bits) {
uint32_t num_blocks =
extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
+inline uint32_t BloomHash(const Slice& key) {
+ return Hash(key.data(), key.size(), 0xbc9f1d34);
+}
+
inline uint32_t GetSliceHash(const Slice& s) {
return Hash(s.data(), s.size(), 397);
}
// Otherwise, calculate a score based on threshold and expected value of
// two styles, weighing reads 4X important than writes.
int expected_levels = static_cast<int>(ceil(
- log(target_db_size / write_buffer_size) / log(kBytesForLevelMultiplier)));
+ ::log(target_db_size / write_buffer_size) / ::log(kBytesForLevelMultiplier)));
int expected_max_files_universal =
static_cast<int>(ceil(log2(target_db_size / write_buffer_size)));
int write_amplification_threshold,
uint64_t target_db_size, Options* options) {
int expected_levels_one_level0_file =
- static_cast<int>(ceil(log(target_db_size / options->write_buffer_size) /
- log(kBytesForLevelMultiplier)));
+ static_cast<int>(ceil(::log(target_db_size / options->write_buffer_size) /
+ ::log(kBytesForLevelMultiplier)));
int level0_stop_writes_trigger =
read_amplification_threshold - expected_levels_one_level0_file;
// how many level of details to look for
int numberOfTilesAtMaxDepth = floor((bottomRight.x - topLeft.x) / 256);
- int zoomLevelsToRise = floor(log(numberOfTilesAtMaxDepth) / log(2));
+ int zoomLevelsToRise = floor(::log(numberOfTilesAtMaxDepth) / ::log(2));
zoomLevelsToRise++;
int levels = std::max(0, Detail - zoomLevelsToRise);
double latitude = clip(pos.latitude, MinLatitude, MaxLatitude);
double x = (pos.longitude + 180) / 360;
double sinLatitude = sin(latitude * PI / 180);
- double y = 0.5 - log((1 + sinLatitude) / (1 - sinLatitude)) / (4 * PI);
+ double y = 0.5 - ::log((1 + sinLatitude) / (1 - sinLatitude)) / (4 * PI);
double mapSize = MapSize(levelOfDetail);
double X = floor(clip(x * mapSize + 0.5, 0, mapSize - 1));
double Y = floor(clip(y * mapSize + 0.5, 0, mapSize - 1));