fi
tar xvzf snappy-$(SNAPPY_VER).tar.gz
mkdir snappy-$(SNAPPY_VER)/build
- cd snappy-$(SNAPPY_VER)/build && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' cmake .. && $(MAKE) ${SNAPPY_MAKE_TARGET}
+ cd snappy-$(SNAPPY_VER)/build && CFLAGS='${EXTRA_CFLAGS}' CXXFLAGS='${EXTRA_CXXFLAGS}' LDFLAGS='${EXTRA_LDFLAGS}' cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON .. && $(MAKE) ${SNAPPY_MAKE_TARGET}
cp snappy-$(SNAPPY_VER)/build/libsnappy.a .
liblz4.a:
// a limit, a flush will be triggered in the next DB to which the next write
// is issued.
//
- // If the object is only passed to on DB, the behavior is the same as
+ // If the object is only passed to one DB, the behavior is the same as
// db_write_buffer_size. When write_buffer_manager is set, the value set will
// override db_write_buffer_size.
//
// Dynamically changeable through SetDBOptions() API.
uint64_t wal_bytes_per_sync = 0;
- // A vector of EventListeners which callback functions will be called
+ // A vector of EventListeners whose callback functions will be called
// when specific RocksDB event happens.
std::vector<std::shared_ptr<EventListener>> listeners;
* 1. Any ticker should be added before TICKER_ENUM_MAX.
* 2. Add a readable string in TickersNameMap below for the newly added ticker.
* 3. Add a corresponding enum value to TickerType.java in the java API
+ * 4. Add the enum conversions from Java and C++ to portal.h's toJavaTickerType and toCppTickers
*/
enum Tickers : uint32_t {
// total block cache misses
// Default: 0 (disabled)
uint32_t read_amp_bytes_per_bit = 0;
- // We currently have three versions:
+ // We currently have five versions:
// 0 -- This version is currently written out by all RocksDB's versions by
// default. Can be read by really old RocksDB's. Doesn't support changing
// checksum (default is CRC32).
rocksjni/compaction_filter.cc
rocksjni/compaction_filter_factory.cc
rocksjni/compaction_filter_factory_jnicallback.cc
+ rocksjni/compaction_job_info.cc
+ rocksjni/compaction_job_stats.cc
+ rocksjni/compaction_options.cc
rocksjni/compaction_options_fifo.cc
rocksjni/compaction_options_universal.cc
rocksjni/compact_range_options.cc
rocksjni/optimistic_transaction_options.cc
rocksjni/options.cc
rocksjni/options_util.cc
+ rocksjni/persistent_cache.cc
rocksjni/ratelimiterjni.cc
rocksjni/remove_emptyvalue_compactionfilterjni.cc
rocksjni/restorejni.cc
rocksjni/statistics.cc
rocksjni/statisticsjni.cc
rocksjni/table.cc
+ rocksjni/table_filter.cc
+ rocksjni/table_filter_jnicallback.cc
+ rocksjni/thread_status.cc
+ rocksjni/trace_writer.cc
+ rocksjni/trace_writer_jnicallback.cc
rocksjni/transaction.cc
rocksjni/transaction_db.cc
rocksjni/transaction_db_options.cc
rocksjni/transaction_notifier_jnicallback.cc
rocksjni/transaction_options.cc
rocksjni/ttl.cc
+ rocksjni/wal_filter.cc
+ rocksjni/wal_filter_jnicallback.cc
rocksjni/write_batch.cc
rocksjni/writebatchhandlerjnicallback.cc
rocksjni/write_batch_test.cc
org.rocksdb.AbstractNativeReference
org.rocksdb.AbstractRocksIterator
org.rocksdb.AbstractSlice
+ org.rocksdb.AbstractTableFilter
+ org.rocksdb.AbstractTraceWriter
org.rocksdb.AbstractTransactionNotifier
+ org.rocksdb.AbstractWalFilter
org.rocksdb.BackupableDBOptions
org.rocksdb.BackupEngine
org.rocksdb.BlockBasedTableConfig
org.rocksdb.ClockCache
org.rocksdb.ColumnFamilyHandle
org.rocksdb.ColumnFamilyOptions
+ org.rocksdb.CompactionJobInfo
+ org.rocksdb.CompactionJobStats
+ org.rocksdb.CompactionOptions
org.rocksdb.CompactionOptionsFIFO
org.rocksdb.CompactionOptionsUniversal
org.rocksdb.CompactRangeOptions
org.rocksdb.FlushOptions
org.rocksdb.HashLinkedListMemTableConfig
org.rocksdb.HashSkipListMemTableConfig
+ org.rocksdb.HdfsEnv
org.rocksdb.IngestExternalFileOptions
org.rocksdb.Logger
org.rocksdb.LRUCache
org.rocksdb.OptimisticTransactionOptions
org.rocksdb.Options
org.rocksdb.OptionsUtil
+ org.rocksdb.PersistentCache
org.rocksdb.PlainTableConfig
org.rocksdb.RateLimiter
org.rocksdb.ReadOptions
org.rocksdb.Statistics
org.rocksdb.StringAppendOperator
org.rocksdb.TableFormatConfig
+ org.rocksdb.ThreadStatus
+ org.rocksdb.TimedEnv
org.rocksdb.Transaction
org.rocksdb.TransactionDB
org.rocksdb.TransactionDBOptions
src/main/java/org/rocksdb/AbstractCompactionFilter.java
src/main/java/org/rocksdb/AbstractComparator.java
src/main/java/org/rocksdb/AbstractImmutableNativeReference.java
+ src/main/java/org/rocksdb/AbstractMutableOptions.java
src/main/java/org/rocksdb/AbstractNativeReference.java
src/main/java/org/rocksdb/AbstractRocksIterator.java
src/main/java/org/rocksdb/AbstractSlice.java
+ src/main/java/org/rocksdb/AbstractTableFilter.java
+ src/main/java/org/rocksdb/AbstractTraceWriter.java
src/main/java/org/rocksdb/AbstractTransactionNotifier.java
+ src/main/java/org/rocksdb/AbstractWalFilter.java
src/main/java/org/rocksdb/AbstractWriteBatch.java
src/main/java/org/rocksdb/AccessHint.java
src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java
src/main/java/org/rocksdb/ClockCache.java
src/main/java/org/rocksdb/ColumnFamilyDescriptor.java
src/main/java/org/rocksdb/ColumnFamilyHandle.java
+ src/main/java/org/rocksdb/ColumnFamilyMetaData.java
src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java
src/main/java/org/rocksdb/ColumnFamilyOptions.java
+ src/main/java/org/rocksdb/CompactionJobInfo.java
+ src/main/java/org/rocksdb/CompactionJobStats.java
+ src/main/java/org/rocksdb/CompactionOptions.java
src/main/java/org/rocksdb/CompactionOptionsFIFO.java
src/main/java/org/rocksdb/CompactionOptionsUniversal.java
src/main/java/org/rocksdb/CompactionPriority.java
+ src/main/java/org/rocksdb/CompactionReason.java
src/main/java/org/rocksdb/CompactRangeOptions.java
src/main/java/org/rocksdb/CompactionStopStyle.java
src/main/java/org/rocksdb/CompactionStyle.java
src/main/java/org/rocksdb/ComparatorType.java
src/main/java/org/rocksdb/CompressionOptions.java
src/main/java/org/rocksdb/CompressionType.java
+ src/main/java/org/rocksdb/DataBlockIndexType.java
src/main/java/org/rocksdb/DBOptionsInterface.java
src/main/java/org/rocksdb/DBOptions.java
src/main/java/org/rocksdb/DbPath.java
src/main/java/org/rocksdb/FlushOptions.java
src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java
src/main/java/org/rocksdb/HashSkipListMemTableConfig.java
+ src/main/java/org/rocksdb/HdfsEnv.java
src/main/java/org/rocksdb/HistogramData.java
src/main/java/org/rocksdb/HistogramType.java
src/main/java/org/rocksdb/IndexType.java
src/main/java/org/rocksdb/InfoLogLevel.java
src/main/java/org/rocksdb/IngestExternalFileOptions.java
+ src/main/java/org/rocksdb/LevelMetaData.java
+ src/main/java/org/rocksdb/LiveFileMetaData.java
+ src/main/java/org/rocksdb/LogFile.java
src/main/java/org/rocksdb/Logger.java
src/main/java/org/rocksdb/LRUCache.java
src/main/java/org/rocksdb/MemoryUsageType.java
src/main/java/org/rocksdb/MemoryUtil.java
src/main/java/org/rocksdb/MemTableConfig.java
src/main/java/org/rocksdb/MergeOperator.java
- src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
src/main/java/org/rocksdb/MutableColumnFamilyOptions.java
+ src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java
+ src/main/java/org/rocksdb/MutableDBOptions.java
+ src/main/java/org/rocksdb/MutableDBOptionsInterface.java
+ src/main/java/org/rocksdb/MutableOptionKey.java
+ src/main/java/org/rocksdb/MutableOptionValue.java
src/main/java/org/rocksdb/NativeComparatorWrapper.java
src/main/java/org/rocksdb/NativeLibraryLoader.java
+ src/main/java/org/rocksdb/OperationStage.java
+ src/main/java/org/rocksdb/OperationType.java
src/main/java/org/rocksdb/OptimisticTransactionDB.java
src/main/java/org/rocksdb/OptimisticTransactionOptions.java
src/main/java/org/rocksdb/Options.java
src/main/java/org/rocksdb/OptionsUtil.java
+ src/main/java/org/rocksdb/PersistentCache.java
src/main/java/org/rocksdb/PlainTableConfig.java
+ src/main/java/org/rocksdb/Priority.java
+ src/main/java/org/rocksdb/Range.java
src/main/java/org/rocksdb/RateLimiter.java
src/main/java/org/rocksdb/RateLimiterMode.java
src/main/java/org/rocksdb/ReadOptions.java
src/main/java/org/rocksdb/RocksMemEnv.java
src/main/java/org/rocksdb/RocksMutableObject.java
src/main/java/org/rocksdb/RocksObject.java
+ src/main/java/org/rocksdb/SizeApproximationFlag.java
src/main/java/org/rocksdb/SkipListMemTableConfig.java
src/main/java/org/rocksdb/Slice.java
src/main/java/org/rocksdb/Snapshot.java
src/main/java/org/rocksdb/SstFileManager.java
+ src/main/java/org/rocksdb/SstFileMetaData.java
src/main/java/org/rocksdb/SstFileWriter.java
+ src/main/java/org/rocksdb/StateType.java
src/main/java/org/rocksdb/StatisticsCollectorCallback.java
src/main/java/org/rocksdb/StatisticsCollector.java
src/main/java/org/rocksdb/Statistics.java
src/main/java/org/rocksdb/StatsLevel.java
src/main/java/org/rocksdb/Status.java
src/main/java/org/rocksdb/StringAppendOperator.java
+ src/main/java/org/rocksdb/TableFilter.java
+ src/main/java/org/rocksdb/TableProperties.java
src/main/java/org/rocksdb/TableFormatConfig.java
+ src/main/java/org/rocksdb/ThreadType.java
+ src/main/java/org/rocksdb/ThreadStatus.java
src/main/java/org/rocksdb/TickerType.java
+ src/main/java/org/rocksdb/TimedEnv.java
+ src/main/java/org/rocksdb/TraceOptions.java
+ src/main/java/org/rocksdb/TraceWriter.java
src/main/java/org/rocksdb/TransactionalDB.java
src/main/java/org/rocksdb/TransactionalOptions.java
src/main/java/org/rocksdb/TransactionDB.java
src/main/java/org/rocksdb/TtlDB.java
src/main/java/org/rocksdb/TxnDBWritePolicy.java
src/main/java/org/rocksdb/VectorMemTableConfig.java
+ src/main/java/org/rocksdb/WalFileType.java
+ src/main/java/org/rocksdb/WalFilter.java
+ src/main/java/org/rocksdb/WalProcessingOption.java
src/main/java/org/rocksdb/WALRecoveryMode.java
src/main/java/org/rocksdb/WBWIRocksIterator.java
src/main/java/org/rocksdb/WriteBatchInterface.java
NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\
org.rocksdb.AbstractCompactionFilterFactory\
org.rocksdb.AbstractSlice\
+ org.rocksdb.AbstractTableFilter\
+ org.rocksdb.AbstractTraceWriter\
org.rocksdb.AbstractTransactionNotifier\
+ org.rocksdb.AbstractWalFilter\
org.rocksdb.BackupEngine\
org.rocksdb.BackupableDBOptions\
org.rocksdb.BlockBasedTableConfig\
org.rocksdb.CassandraValueMergeOperator\
org.rocksdb.ColumnFamilyHandle\
org.rocksdb.ColumnFamilyOptions\
+ org.rocksdb.CompactionJobInfo\
+ org.rocksdb.CompactionJobStats\
+ org.rocksdb.CompactionOptions\
org.rocksdb.CompactionOptionsFIFO\
org.rocksdb.CompactionOptionsUniversal\
org.rocksdb.CompactRangeOptions\
org.rocksdb.IngestExternalFileOptions\
org.rocksdb.HashLinkedListMemTableConfig\
org.rocksdb.HashSkipListMemTableConfig\
+ org.rocksdb.HdfsEnv\
org.rocksdb.Logger\
org.rocksdb.LRUCache\
org.rocksdb.MemoryUsageType\
org.rocksdb.OptimisticTransactionOptions\
org.rocksdb.Options\
org.rocksdb.OptionsUtil\
+ org.rocksdb.PersistentCache\
org.rocksdb.PlainTableConfig\
org.rocksdb.RateLimiter\
org.rocksdb.ReadOptions\
org.rocksdb.SstFileManager\
org.rocksdb.SstFileWriter\
org.rocksdb.Statistics\
+ org.rocksdb.ThreadStatus\
+ org.rocksdb.TimedEnv\
org.rocksdb.Transaction\
org.rocksdb.TransactionDB\
org.rocksdb.TransactionDBOptions\
org.rocksdb.ClockCacheTest\
org.rocksdb.ColumnFamilyOptionsTest\
org.rocksdb.ColumnFamilyTest\
- org.rocksdb.CompactionFilterFactoryTest\
+ org.rocksdb.CompactionFilterFactoryTest\
+ org.rocksdb.CompactionJobInfoTest\
+ org.rocksdb.CompactionJobStatsTest\
+ org.rocksdb.CompactionOptionsTest\
org.rocksdb.CompactionOptionsFIFOTest\
org.rocksdb.CompactionOptionsUniversalTest\
org.rocksdb.CompactionPriorityTest\
org.rocksdb.DirectComparatorTest\
org.rocksdb.DirectSliceTest\
org.rocksdb.EnvOptionsTest\
+ org.rocksdb.HdfsEnvTest\
org.rocksdb.IngestExternalFileOptionsTest\
org.rocksdb.util.EnvironmentTest\
org.rocksdb.FilterTest\
org.rocksdb.MergeTest\
org.rocksdb.MixedOptionsTest\
org.rocksdb.MutableColumnFamilyOptionsTest\
+ org.rocksdb.MutableDBOptionsTest\
org.rocksdb.NativeComparatorWrapperTest\
org.rocksdb.NativeLibraryLoaderTest\
org.rocksdb.OptimisticTransactionTest\
org.rocksdb.ReadOptionsTest\
org.rocksdb.RocksDBTest\
org.rocksdb.RocksDBExceptionTest\
- org.rocksdb.RocksEnvTest\
+ org.rocksdb.DefaultEnvTest\
org.rocksdb.RocksIteratorTest\
org.rocksdb.RocksMemEnvTest\
org.rocksdb.util.SizeUnitTest\
org.rocksdb.SnapshotTest\
org.rocksdb.SstFileManagerTest\
org.rocksdb.SstFileWriterTest\
+ org.rocksdb.TableFilterTest\
+ org.rocksdb.TimedEnvTest\
org.rocksdb.TransactionTest\
org.rocksdb.TransactionDBTest\
org.rocksdb.TransactionOptionsTest\
org.rocksdb.TtlDBTest\
org.rocksdb.StatisticsTest\
org.rocksdb.StatisticsCollectorTest\
+ org.rocksdb.WalFilterTest\
org.rocksdb.WALRecoveryModeTest\
org.rocksdb.WriteBatchHandlerTest\
org.rocksdb.WriteBatchTest\
options.setCreateIfMissing(false);
}
if (useMemenv_) {
- options.setEnv(new RocksMemEnv());
+ options.setEnv(new RocksMemEnv(Env.getDefault()));
}
switch (memtable_) {
case "skip_list":
* Signature: (J)V
*/
void Java_org_rocksdb_AbstractCompactionFilterFactory_disposeInternal(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* ptr_sptr_cff = reinterpret_cast<
std::shared_ptr<rocksdb::CompactionFilterFactoryJniCallback>*>(jhandle);
delete ptr_sptr_cff;
- // @lint-ignore TXT4 T25377293 Grandfathered in
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// rocksdb::CompactionJobInfo.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionJobInfo.h"
+#include "rocksdb/listener.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: newCompactionJobInfo
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionJobInfo_newCompactionJobInfo(
+ JNIEnv*, jclass) {
+ auto* compact_job_info = new rocksdb::CompactionJobInfo();
+ return reinterpret_cast<jlong>(compact_job_info);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionJobInfo_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ delete compact_job_info;
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: columnFamilyName
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_CompactionJobInfo_columnFamilyName(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return rocksdb::JniUtil::copyBytes(
+ env, compact_job_info->cf_name);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: status
+ * Signature: (J)Lorg/rocksdb/Status;
+ */
+jobject Java_org_rocksdb_CompactionJobInfo_status(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return rocksdb::StatusJni::construct(
+ env, compact_job_info->status);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: threadId
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobInfo_threadId(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return static_cast<jlong>(compact_job_info->thread_id);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: jobId
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionJobInfo_jobId(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return static_cast<jint>(compact_job_info->job_id);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: baseInputLevel
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionJobInfo_baseInputLevel(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return static_cast<jint>(compact_job_info->base_input_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: outputLevel
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionJobInfo_outputLevel(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return static_cast<jint>(compact_job_info->output_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: inputFiles
+ * Signature: (J)[Ljava/lang/String;
+ */
+jobjectArray Java_org_rocksdb_CompactionJobInfo_inputFiles(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return rocksdb::JniUtil::toJavaStrings(
+ env, &compact_job_info->input_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: outputFiles
+ * Signature: (J)[Ljava/lang/String;
+ */
+jobjectArray Java_org_rocksdb_CompactionJobInfo_outputFiles(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return rocksdb::JniUtil::toJavaStrings(
+ env, &compact_job_info->output_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: tableProperties
+ * Signature: (J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_CompactionJobInfo_tableProperties(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ auto* map = &compact_job_info->table_properties;
+
+ jobject jhash_map = rocksdb::HashMapJni::construct(
+ env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const rocksdb::HashMapJni::FnMapKV<const std::string, std::shared_ptr<const rocksdb::TableProperties>, jobject, jobject> fn_map_kv =
+ [env](const std::pair<const std::string, std::shared_ptr<const rocksdb::TableProperties>>& kv) {
+ jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jtable_properties = rocksdb::TablePropertiesJni::fromCppTableProperties(
+ env, *(kv.second.get()));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(
+ new std::pair<jobject, jobject>(static_cast<jobject>(jkey), jtable_properties));
+ };
+
+ if (!rocksdb::HashMapJni::putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: compactionReason
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_CompactionJobInfo_compactionReason(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return rocksdb::CompactionReasonJni::toJavaCompactionReason(
+ compact_job_info->compaction_reason);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: compression
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_CompactionJobInfo_compression(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ return rocksdb::CompressionTypeJni::toJavaCompressionType(
+ compact_job_info->compression);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobInfo
+ * Method: stats
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobInfo_stats(
+ JNIEnv *, jclass, jlong jhandle) {
+ auto* compact_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jhandle);
+ auto* stats = new rocksdb::CompactionJobStats();
+ stats->Add(compact_job_info->stats);
+ return reinterpret_cast<jlong>(stats);
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// rocksdb::CompactionJobStats.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionJobStats.h"
+#include "rocksdb/compaction_job_stats.h"
+#include "rocksjni/portal.h"
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: newCompactionJobStats
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_newCompactionJobStats(
+ JNIEnv*, jclass) {
+ auto* compact_job_stats = new rocksdb::CompactionJobStats();
+ return reinterpret_cast<jlong>(compact_job_stats);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionJobStats_disposeInternal(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ delete compact_job_stats;
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: reset
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionJobStats_reset(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ compact_job_stats->Reset();
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: add
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_CompactionJobStats_add(
+ JNIEnv*, jclass, jlong jhandle, jlong jother_handle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ auto* other_compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jother_handle);
+ compact_job_stats->Add(*other_compact_job_stats);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: elapsedMicros
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_elapsedMicros(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(compact_job_stats->elapsed_micros);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(compact_job_stats->num_input_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputFiles
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputFiles(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(compact_job_stats->num_input_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputFilesAtOutputLevel
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputFilesAtOutputLevel(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_input_files_at_output_level);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numOutputRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numOutputRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_output_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numOutputFiles
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numOutputFiles(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_output_files);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: isManualCompaction
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompactionJobStats_isManualCompaction(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ if (compact_job_stats->is_manual_compaction) {
+ return JNI_TRUE;
+ } else {
+ return JNI_FALSE;
+ }
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalInputBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalInputBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_input_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalOutputBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalOutputBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_output_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numRecordsReplaced
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numRecordsReplaced(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_records_replaced);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalInputRawKeyBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalInputRawKeyBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_input_raw_key_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: totalInputRawValueBytes
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_totalInputRawValueBytes(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->total_input_raw_value_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numInputDeletionRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numInputDeletionRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_input_deletion_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numExpiredDeletionRecords
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numExpiredDeletionRecords(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_expired_deletion_records);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numCorruptKeys
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numCorruptKeys(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_corrupt_keys);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: fileWriteNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_fileWriteNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_write_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: fileRangeSyncNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_fileRangeSyncNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_range_sync_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: fileFsyncNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_fileFsyncNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_fsync_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: filePrepareWriteNanos
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_filePrepareWriteNanos(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->file_prepare_write_nanos);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: smallestOutputKeyPrefix
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_CompactionJobStats_smallestOutputKeyPrefix(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return rocksdb::JniUtil::copyBytes(env,
+ compact_job_stats->smallest_output_key_prefix);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: largestOutputKeyPrefix
+ * Signature: (J)[B
+ */
+jbyteArray Java_org_rocksdb_CompactionJobStats_largestOutputKeyPrefix(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return rocksdb::JniUtil::copyBytes(env,
+ compact_job_stats->largest_output_key_prefix);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numSingleDelFallthru
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numSingleDelFallthru(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_single_del_fallthru);
+}
+
+/*
+ * Class: org_rocksdb_CompactionJobStats
+ * Method: numSingleDelMismatch
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionJobStats_numSingleDelMismatch(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_job_stats =
+ reinterpret_cast<rocksdb::CompactionJobStats*>(jhandle);
+ return static_cast<jlong>(
+ compact_job_stats->num_single_del_mismatch);
+}
\ No newline at end of file
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// rocksdb::CompactionOptions.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_CompactionOptions.h"
+#include "rocksdb/options.h"
+#include "rocksjni/portal.h"
+
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: newCompactionOptions
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_CompactionOptions_newCompactionOptions(
+ JNIEnv*, jclass) {
+ auto* compact_opts = new rocksdb::CompactionOptions();
+ return reinterpret_cast<jlong>(compact_opts);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_CompactionOptions_disposeInternal(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
+ delete compact_opts;
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: compression
+ * Signature: (J)B
+ */
+jbyte Java_org_rocksdb_CompactionOptions_compression(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
+ return rocksdb::CompressionTypeJni::toJavaCompressionType(
+ compact_opts->compression);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: setCompression
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_CompactionOptions_setCompression(
+ JNIEnv*, jclass, jlong jhandle, jbyte jcompression_type_value) {
+ auto* compact_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
+ compact_opts->compression =
+ rocksdb::CompressionTypeJni::toCppCompressionType(
+ jcompression_type_value);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: outputFileSizeLimit
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_CompactionOptions_outputFileSizeLimit(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
+ return static_cast<jlong>(
+ compact_opts->output_file_size_limit);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: setOutputFileSizeLimit
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_CompactionOptions_setOutputFileSizeLimit(
+ JNIEnv*, jclass, jlong jhandle, jlong joutput_file_size_limit) {
+ auto* compact_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
+ compact_opts->output_file_size_limit =
+ static_cast<uint64_t>(joutput_file_size_limit);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: maxSubcompactions
+ * Signature: (J)I
+ */
+jint Java_org_rocksdb_CompactionOptions_maxSubcompactions(
+ JNIEnv*, jclass, jlong jhandle) {
+ auto* compact_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
+ return static_cast<jint>(
+ compact_opts->max_subcompactions);
+}
+
+/*
+ * Class: org_rocksdb_CompactionOptions
+ * Method: setMaxSubcompactions
+ * Signature: (JI)V
+ */
+void Java_org_rocksdb_CompactionOptions_setMaxSubcompactions(
+ JNIEnv*, jclass, jlong jhandle, jint jmax_subcompactions) {
+ auto* compact_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jhandle);
+ compact_opts->max_subcompactions =
+ static_cast<uint32_t>(jmax_subcompactions);
+}
\ No newline at end of file
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO(
- JNIEnv* /*env*/, jclass /*jcls*/) {
+ JNIEnv*, jclass) {
const auto* opt = new rocksdb::CompactionOptionsFIFO();
return reinterpret_cast<jlong>(opt);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jmax_table_files_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
opt->max_table_files_size = static_cast<uint64_t>(jmax_table_files_size);
}
* Method: maxTableFilesSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
return static_cast<jlong>(opt->max_table_files_size);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_CompactionOptionsFIFO_setAllowCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean allow_compaction) {
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_compaction) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
opt->allow_compaction = static_cast<bool>(allow_compaction);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_CompactionOptionsFIFO_allowCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
return static_cast<jboolean>(opt->allow_compaction);
}
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(jhandle);
}
* Signature: ()J
*/
jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal(
- JNIEnv* /*env*/, jclass /*jcls*/) {
+ JNIEnv*, jclass) {
const auto* opt = new rocksdb::CompactionOptionsUniversal();
return reinterpret_cast<jlong>(opt);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jsize_ratio) {
+ JNIEnv*, jobject, jlong jhandle, jint jsize_ratio) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->size_ratio = static_cast<unsigned int>(jsize_ratio);
}
* Method: sizeRatio
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->size_ratio);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jmin_merge_width) {
+ JNIEnv*, jobject, jlong jhandle, jint jmin_merge_width) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->min_merge_width = static_cast<unsigned int>(jmin_merge_width);
}
* Method: minMergeWidth
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->min_merge_width);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jmax_merge_width) {
+ JNIEnv*, jobject, jlong jhandle, jint jmax_merge_width) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->max_merge_width = static_cast<unsigned int>(jmax_merge_width);
}
* Method: maxMergeWidth
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->max_merge_width);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jmax_size_amplification_percent) {
+ JNIEnv*, jobject, jlong jhandle, jint jmax_size_amplification_percent) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->max_size_amplification_percent =
static_cast<unsigned int>(jmax_size_amplification_percent);
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->max_size_amplification_percent);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jcompression_size_percent) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->compression_size_percent =
* Signature: (J)I
*/
jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return static_cast<jint>(opt->compression_size_percent);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jbyte jstop_style_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jstop_style_value) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->stop_style = rocksdb::CompactionStopStyleJni::toCppCompactionStopStyle(
jstop_style_value);
* Method: stopStyle
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return rocksdb::CompactionStopStyleJni::toJavaCompactionStopStyle(
opt->stop_style);
* Signature: (JZ)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jallow_trivial_move) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_trivial_move) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
opt->allow_trivial_move = static_cast<bool>(jallow_trivial_move);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
return opt->allow_trivial_move;
}
* Signature: (J)V
*/
void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(jhandle);
}
* Signature: ()J
*/
jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions(
- JNIEnv* /*env*/, jclass /*jcls*/) {
+ JNIEnv*, jclass) {
const auto* opt = new rocksdb::CompressionOptions();
return reinterpret_cast<jlong>(opt);
}
* Method: setWindowBits
* Signature: (JI)V
*/
-void Java_org_rocksdb_CompressionOptions_setWindowBits(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint jwindow_bits) {
+void Java_org_rocksdb_CompressionOptions_setWindowBits(
+ JNIEnv*, jobject, jlong jhandle, jint jwindow_bits) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->window_bits = static_cast<int>(jwindow_bits);
}
* Method: windowBits
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompressionOptions_windowBits(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompressionOptions_windowBits(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->window_bits);
}
* Method: setLevel
* Signature: (JI)V
*/
-void Java_org_rocksdb_CompressionOptions_setLevel(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle, jint jlevel) {
+void Java_org_rocksdb_CompressionOptions_setLevel(
+ JNIEnv*, jobject, jlong jhandle, jint jlevel) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->level = static_cast<int>(jlevel);
}
* Method: level
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompressionOptions_level(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompressionOptions_level(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->level);
}
* Method: setStrategy
* Signature: (JI)V
*/
-void Java_org_rocksdb_CompressionOptions_setStrategy(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint jstrategy) {
+void Java_org_rocksdb_CompressionOptions_setStrategy(
+ JNIEnv*, jobject, jlong jhandle, jint jstrategy) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
opt->strategy = static_cast<int>(jstrategy);
}
* Method: strategy
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompressionOptions_strategy(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompressionOptions_strategy(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->strategy);
}
* Method: setMaxDictBytes
* Signature: (JI)V
*/
-void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint jmax_dict_bytes) {
+void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(
+ JNIEnv*, jobject, jlong jhandle, jint jmax_dict_bytes) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
- opt->max_dict_bytes = static_cast<int>(jmax_dict_bytes);
+ opt->max_dict_bytes = static_cast<uint32_t>(jmax_dict_bytes);
}
/*
* Method: maxDictBytes
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompressionOptions_maxDictBytes(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompressionOptions_maxDictBytes(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
return static_cast<jint>(opt->max_dict_bytes);
}
/*
* Class: org_rocksdb_CompressionOptions
- * Method: setEnabled
+ * Method: setZstdMaxTrainBytes
* Signature: (JI)V
*/
-void Java_org_rocksdb_CompressionOptions_setEnabled(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean jenabled) {
+void Java_org_rocksdb_CompressionOptions_setZstdMaxTrainBytes(
+ JNIEnv*, jobject, jlong jhandle, jint jzstd_max_train_bytes) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
- opt->enabled = static_cast<int>(jenabled);
+ opt->zstd_max_train_bytes = static_cast<uint32_t>(jzstd_max_train_bytes);
}
/*
* Class: org_rocksdb_CompressionOptions
- * Method: Enabled
+ * Method: zstdMaxTrainBytes
* Signature: (J)I
*/
-jint Java_org_rocksdb_CompressionOptions_enabled(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_CompressionOptions_zstdMaxTrainBytes(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
+ return static_cast<jint>(opt->zstd_max_train_bytes);
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: setEnabled
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_CompressionOptions_setEnabled(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenabled) {
+ auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
+ opt->enabled = jenabled == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_CompressionOptions
+ * Method: enabled
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_CompressionOptions_enabled(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
- return static_cast<jint>(opt->enabled);
+ return static_cast<bool>(opt->enabled);
}
/*
* Class: org_rocksdb_CompressionOptions
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_CompressionOptions_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_CompressionOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
delete reinterpret_cast<rocksdb::CompressionOptions*>(jhandle);
}
// This file implements the "bridge" between Java and C++ and enables
// calling c++ rocksdb::Env methods from Java side.
+#include <jni.h>
+#include <vector>
+
+#include "portal.h"
#include "rocksdb/env.h"
#include "include/org_rocksdb_Env.h"
+#include "include/org_rocksdb_HdfsEnv.h"
#include "include/org_rocksdb_RocksEnv.h"
#include "include/org_rocksdb_RocksMemEnv.h"
+#include "include/org_rocksdb_TimedEnv.h"
/*
* Class: org_rocksdb_Env
* Method: getDefaultEnvInternal
* Signature: ()J
*/
-jlong Java_org_rocksdb_Env_getDefaultEnvInternal(JNIEnv* /*env*/,
- jclass /*jclazz*/) {
+jlong Java_org_rocksdb_Env_getDefaultEnvInternal(
+ JNIEnv*, jclass) {
return reinterpret_cast<jlong>(rocksdb::Env::Default());
}
+/*
+ * Class: org_rocksdb_RocksEnv
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
+ assert(e != nullptr);
+ delete e;
+}
+
/*
* Class: org_rocksdb_Env
* Method: setBackgroundThreads
- * Signature: (JII)V
+ * Signature: (JIB)V
*/
-void Java_org_rocksdb_Env_setBackgroundThreads(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jint num, jint priority) {
+void Java_org_rocksdb_Env_setBackgroundThreads(
+ JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
- switch (priority) {
- case org_rocksdb_Env_FLUSH_POOL:
- rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::LOW);
- break;
- case org_rocksdb_Env_COMPACTION_POOL:
- rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::HIGH);
- break;
- }
+ rocks_env->SetBackgroundThreads(static_cast<int>(jnum),
+ rocksdb::PriorityJni::toCppPriority(jpriority_value));
}
/*
- * Class: org_rocksdb_sEnv
+ * Class: org_rocksdb_Env
+ * Method: getBackgroundThreads
+ * Signature: (JB)I
+ */
+jint Java_org_rocksdb_Env_getBackgroundThreads(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
+ const int num = rocks_env->GetBackgroundThreads(
+ rocksdb::PriorityJni::toCppPriority(jpriority_value));
+ return static_cast<jint>(num);
+}
+
+/*
+ * Class: org_rocksdb_Env
* Method: getThreadPoolQueueLen
- * Signature: (JI)I
+ * Signature: (JB)I
+ */
+jint Java_org_rocksdb_Env_getThreadPoolQueueLen(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
+ const int queue_len = rocks_env->GetThreadPoolQueueLen(
+ rocksdb::PriorityJni::toCppPriority(jpriority_value));
+ return static_cast<jint>(queue_len);
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: incBackgroundThreadsIfNeeded
+ * Signature: (JIB)V
+ */
+void Java_org_rocksdb_Env_incBackgroundThreadsIfNeeded(
+ JNIEnv*, jobject, jlong jhandle, jint jnum, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
+ rocks_env->IncBackgroundThreadsIfNeeded(static_cast<int>(jnum),
+ rocksdb::PriorityJni::toCppPriority(jpriority_value));
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: lowerThreadPoolIOPriority
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Env_lowerThreadPoolIOPriority(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
+ rocks_env->LowerThreadPoolIOPriority(
+ rocksdb::PriorityJni::toCppPriority(jpriority_value));
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: lowerThreadPoolCPUPriority
+ * Signature: (JB)V
+ */
+void Java_org_rocksdb_Env_lowerThreadPoolCPUPriority(
+ JNIEnv*, jobject, jlong jhandle, jbyte jpriority_value) {
+ auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
+ rocks_env->LowerThreadPoolCPUPriority(
+ rocksdb::PriorityJni::toCppPriority(jpriority_value));
+}
+
+/*
+ * Class: org_rocksdb_Env
+ * Method: getThreadList
+ * Signature: (J)[Lorg/rocksdb/ThreadStatus;
*/
-jint Java_org_rocksdb_Env_getThreadPoolQueueLen(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jint pool_id) {
+jobjectArray Java_org_rocksdb_Env_getThreadList(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jhandle);
- switch (pool_id) {
- case org_rocksdb_RocksEnv_FLUSH_POOL:
- return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::LOW);
- case org_rocksdb_RocksEnv_COMPACTION_POOL:
- return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::HIGH);
+ std::vector<rocksdb::ThreadStatus> thread_status;
+ rocksdb::Status s = rocks_env->GetThreadList(&thread_status);
+ if (!s.ok()) {
+ // error, throw exception
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // object[]
+ const jsize len = static_cast<jsize>(thread_status.size());
+ jobjectArray jthread_status =
+ env->NewObjectArray(len, rocksdb::ThreadStatusJni::getJClass(env), nullptr);
+ if (jthread_status == nullptr) {
+ // an exception occurred
+ return nullptr;
+ }
+ for (jsize i = 0; i < len; ++i) {
+ jobject jts =
+ rocksdb::ThreadStatusJni::construct(env, &(thread_status[i]));
+ env->SetObjectArrayElement(jthread_status, i, jts);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jthread_status);
+ return nullptr;
+ }
}
- return 0;
+
+ return jthread_status;
}
/*
* Class: org_rocksdb_RocksMemEnv
* Method: createMemEnv
- * Signature: ()J
+ * Signature: (J)J
*/
-jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(JNIEnv* /*env*/,
- jclass /*jclazz*/) {
- return reinterpret_cast<jlong>(rocksdb::NewMemEnv(rocksdb::Env::Default()));
+jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(
+ JNIEnv*, jclass, jlong jbase_env_handle) {
+ auto* base_env = reinterpret_cast<rocksdb::Env*>(jbase_env_handle);
+ return reinterpret_cast<jlong>(rocksdb::NewMemEnv(base_env));
}
/*
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_RocksMemEnv_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_RocksMemEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
+ assert(e != nullptr);
+ delete e;
+}
+
+/*
+ * Class: org_rocksdb_HdfsEnv
+ * Method: createHdfsEnv
+ * Signature: (Ljava/lang/String;)J
+ */
+jlong Java_org_rocksdb_HdfsEnv_createHdfsEnv(
+ JNIEnv* env, jclass, jstring jfsname) {
+ jboolean has_exception = JNI_FALSE;
+ auto fsname = rocksdb::JniUtil::copyStdString(env, jfsname, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return 0;
+ }
+ rocksdb::Env* hdfs_env;
+ rocksdb::Status s = rocksdb::NewHdfsEnv(&hdfs_env, fsname);
+ if (!s.ok()) {
+ // error occurred
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+ return reinterpret_cast<jlong>(hdfs_env);
+}
+
+/*
+ * Class: org_rocksdb_HdfsEnv
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_HdfsEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
assert(e != nullptr);
delete e;
}
+
+/*
+ * Class: org_rocksdb_TimedEnv
+ * Method: createTimedEnv
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_TimedEnv_createTimedEnv(
+ JNIEnv*, jclass, jlong jbase_env_handle) {
+ auto* base_env = reinterpret_cast<rocksdb::Env*>(jbase_env_handle);
+ return reinterpret_cast<jlong>(rocksdb::NewTimedEnv(base_env));
+}
+
+/*
+ * Class: org_rocksdb_TimedEnv
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TimedEnv_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* e = reinterpret_cast<rocksdb::Env*>(jhandle);
+ assert(e != nullptr);
+ delete e;
+}
+
* Method: newEnvOptions
* Signature: ()J
*/
-jlong Java_org_rocksdb_EnvOptions_newEnvOptions(JNIEnv * /*env*/,
- jclass /*jcls*/) {
+jlong Java_org_rocksdb_EnvOptions_newEnvOptions__(
+ JNIEnv*, jclass) {
auto *env_opt = new rocksdb::EnvOptions();
return reinterpret_cast<jlong>(env_opt);
}
+/*
+ * Class: org_rocksdb_EnvOptions
+ * Method: newEnvOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_EnvOptions_newEnvOptions__J(
+ JNIEnv*, jclass, jlong jdboptions_handle) {
+ auto* db_options =
+ reinterpret_cast<rocksdb::DBOptions*>(jdboptions_handle);
+ auto* env_opt = new rocksdb::EnvOptions(*db_options);
+ return reinterpret_cast<jlong>(env_opt);
+}
+
/*
* Class: org_rocksdb_EnvOptions
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_EnvOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
auto *eo = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
assert(eo != nullptr);
delete eo;
/*
* Class: org_rocksdb_EnvOptions
- * Method: setUseDirectReads
+ * Method: setUseMmapReads
* Signature: (JZ)V
*/
-void Java_org_rocksdb_EnvOptions_setUseDirectReads(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean use_direct_reads) {
- ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads);
+void Java_org_rocksdb_EnvOptions_setUseMmapReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_reads) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads);
}
/*
* Class: org_rocksdb_EnvOptions
- * Method: useDirectReads
+ * Method: useMmapReads
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_EnvOptions_useDirectReads(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- return ENV_OPTIONS_GET(jhandle, use_direct_reads);
+jboolean Java_org_rocksdb_EnvOptions_useMmapReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_mmap_reads);
}
/*
* Class: org_rocksdb_EnvOptions
- * Method: setUseDirectWrites
+ * Method: setUseMmapWrites
* Signature: (JZ)V
*/
-void Java_org_rocksdb_EnvOptions_setUseDirectWrites(
- JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean use_direct_writes) {
- ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes);
+void Java_org_rocksdb_EnvOptions_setUseMmapWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_mmap_writes) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes);
}
/*
* Class: org_rocksdb_EnvOptions
- * Method: useDirectWrites
+ * Method: useMmapWrites
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- return ENV_OPTIONS_GET(jhandle, use_direct_writes);
+jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_mmap_writes);
}
/*
* Class: org_rocksdb_EnvOptions
- * Method: setUseMmapReads
+ * Method: setUseDirectReads
* Signature: (JZ)V
*/
-void Java_org_rocksdb_EnvOptions_setUseMmapReads(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean use_mmap_reads) {
- ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads);
+void Java_org_rocksdb_EnvOptions_setUseDirectReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads);
}
/*
* Class: org_rocksdb_EnvOptions
- * Method: useMmapReads
+ * Method: useDirectReads
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_EnvOptions_useMmapReads(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- return ENV_OPTIONS_GET(jhandle, use_mmap_reads);
+jboolean Java_org_rocksdb_EnvOptions_useDirectReads(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_direct_reads);
}
/*
* Class: org_rocksdb_EnvOptions
- * Method: setUseMmapWrites
+ * Method: setUseDirectWrites
* Signature: (JZ)V
*/
-void Java_org_rocksdb_EnvOptions_setUseMmapWrites(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean use_mmap_writes) {
- ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes);
+void Java_org_rocksdb_EnvOptions_setUseDirectWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_writes) {
+ ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes);
}
/*
* Class: org_rocksdb_EnvOptions
- * Method: useMmapWrites
+ * Method: useDirectWrites
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- return ENV_OPTIONS_GET(jhandle, use_mmap_writes);
+jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(
+ JNIEnv*, jobject, jlong jhandle) {
+ return ENV_OPTIONS_GET(jhandle, use_direct_writes);
}
/*
* Method: setAllowFallocate
* Signature: (JZ)V
*/
-void Java_org_rocksdb_EnvOptions_setAllowFallocate(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean allow_fallocate) {
+void Java_org_rocksdb_EnvOptions_setAllowFallocate(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_fallocate) {
ENV_OPTIONS_SET_BOOL(jhandle, allow_fallocate);
}
* Method: allowFallocate
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_EnvOptions_allowFallocate(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_EnvOptions_allowFallocate(
+ JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, allow_fallocate);
}
* Method: setSetFdCloexec
* Signature: (JZ)V
*/
-void Java_org_rocksdb_EnvOptions_setSetFdCloexec(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean set_fd_cloexec) {
+void Java_org_rocksdb_EnvOptions_setSetFdCloexec(
+ JNIEnv*, jobject, jlong jhandle, jboolean set_fd_cloexec) {
ENV_OPTIONS_SET_BOOL(jhandle, set_fd_cloexec);
}
* Method: setFdCloexec
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(
+ JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, set_fd_cloexec);
}
* Method: setBytesPerSync
* Signature: (JJ)V
*/
-void Java_org_rocksdb_EnvOptions_setBytesPerSync(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong bytes_per_sync) {
+void Java_org_rocksdb_EnvOptions_setBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) {
ENV_OPTIONS_SET_UINT64_T(jhandle, bytes_per_sync);
}
* Method: bytesPerSync
* Signature: (J)J
*/
-jlong Java_org_rocksdb_EnvOptions_bytesPerSync(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_EnvOptions_bytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, bytes_per_sync);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize(
- JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean fallocate_with_keep_size) {
+ JNIEnv*, jobject, jlong jhandle, jboolean fallocate_with_keep_size) {
ENV_OPTIONS_SET_BOOL(jhandle, fallocate_with_keep_size);
}
* Method: fallocateWithKeepSize
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(
+ JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, fallocate_with_keep_size);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize(
- JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong compaction_readahead_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong compaction_readahead_size) {
ENV_OPTIONS_SET_SIZE_T(jhandle, compaction_readahead_size);
}
* Method: compactionReadaheadSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, compaction_readahead_size);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize(
- JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong random_access_max_buffer_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong random_access_max_buffer_size) {
ENV_OPTIONS_SET_SIZE_T(jhandle, random_access_max_buffer_size);
}
* Method: randomAccessMaxBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, random_access_max_buffer_size);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize(
- JNIEnv * /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong writable_file_max_buffer_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong writable_file_max_buffer_size) {
ENV_OPTIONS_SET_SIZE_T(jhandle, writable_file_max_buffer_size);
}
* Method: writableFileMaxBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv * /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
return ENV_OPTIONS_GET(jhandle, writable_file_max_buffer_size);
}
* Method: setRateLimiter
* Signature: (JJ)V
*/
-void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv * /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jlong rl_handle) {
+void Java_org_rocksdb_EnvOptions_setRateLimiter(
+ JNIEnv*, jobject, jlong jhandle, jlong rl_handle) {
auto *sptr_rate_limiter =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter> *>(rl_handle);
auto *env_opt = reinterpret_cast<rocksdb::EnvOptions *>(jhandle);
* Signature: ()J
*/
jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__(
- JNIEnv* /*env*/, jclass /*jclazz*/) {
+ JNIEnv*, jclass) {
auto* options = new rocksdb::IngestExternalFileOptions();
return reinterpret_cast<jlong>(options);
}
* Signature: (ZZZZ)J
*/
jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__ZZZZ(
- JNIEnv* /*env*/, jclass /*jcls*/, jboolean jmove_files,
+ JNIEnv*, jclass, jboolean jmove_files,
jboolean jsnapshot_consistency, jboolean jallow_global_seqno,
jboolean jallow_blocking_flush) {
auto* options = new rocksdb::IngestExternalFileOptions();
* Method: moveFiles
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(
+ JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->move_files);
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean jmove_files) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jmove_files) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->move_files = static_cast<bool>(jmove_files);
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->snapshot_consistency);
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jsnapshot_consistency) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jsnapshot_consistency) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->snapshot_consistency = static_cast<bool>(jsnapshot_consistency);
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->allow_global_seqno);
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jallow_global_seqno) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_global_seqno) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->allow_global_seqno = static_cast<bool>(jallow_global_seqno);
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
return static_cast<jboolean>(options->allow_blocking_flush);
* Signature: (JZ)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_setAllowBlockingFlush(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jallow_blocking_flush) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_blocking_flush) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
options->allow_blocking_flush = static_cast<bool>(jallow_blocking_flush);
}
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: ingestBehind
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_IngestExternalFileOptions_ingestBehind(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
+ return options->ingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setIngestBehind
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_IngestExternalFileOptions_setIngestBehind(
+ JNIEnv*, jobject, jlong jhandle, jboolean jingest_behind) {
+ auto* options =
+ reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
+ options->ingest_behind = jingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: writeGlobalSeqno
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_rocksdb_IngestExternalFileOptions_writeGlobalSeqno(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options =
+ reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
+ return options->write_global_seqno == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_IngestExternalFileOptions
+ * Method: setWriteGlobalSeqno
+ * Signature: (JZ)V
+ */
+JNIEXPORT void JNICALL Java_org_rocksdb_IngestExternalFileOptions_setWriteGlobalSeqno(
+ JNIEnv*, jobject, jlong jhandle, jboolean jwrite_global_seqno) {
+ auto* options =
+ reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
+ options->write_global_seqno = jwrite_global_seqno == JNI_TRUE;
+}
+
/*
* Class: org_rocksdb_IngestExternalFileOptions
* Method: disposeInternal
* Signature: (J)V
*/
void Java_org_rocksdb_IngestExternalFileOptions_disposeInternal(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* options =
reinterpret_cast<rocksdb::IngestExternalFileOptions*>(jhandle);
delete options;
- // @lint-ignore TXT4 T25377293 Grandfathered in
}
\ No newline at end of file
// exception occurred
return nullptr;
}
- const rocksdb::HashMapJni::FnMapKV<const rocksdb::MemoryUtil::UsageType, const uint64_t>
+ const rocksdb::HashMapJni::FnMapKV<const rocksdb::MemoryUtil::UsageType, const uint64_t, jobject, jobject>
fn_map_kv =
[env](const std::pair<rocksdb::MemoryUtil::UsageType, uint64_t>& pair) {
// Construct key
jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject /*jobj*/, jlong jbucket_count, jint jheight,
jint jbranching_factor) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jbucket_count);
+ rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jbucket_count);
if (s.ok()) {
return reinterpret_cast<jlong>(rocksdb::NewHashSkipListRepFactory(
static_cast<size_t>(jbucket_count), static_cast<int32_t>(jheight),
jlong jhuge_page_tlb_size, jint jbucket_entries_logging_threshold,
jboolean jif_log_bucket_dist_when_flash, jint jthreshold_use_skiplist) {
rocksdb::Status statusBucketCount =
- rocksdb::check_if_jlong_fits_size_t(jbucket_count);
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jbucket_count);
rocksdb::Status statusHugePageTlb =
- rocksdb::check_if_jlong_fits_size_t(jhuge_page_tlb_size);
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jhuge_page_tlb_size);
if (statusBucketCount.ok() && statusHugePageTlb.ok()) {
return reinterpret_cast<jlong>(rocksdb::NewHashLinkListRepFactory(
static_cast<size_t>(jbucket_count),
*/
jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle(
JNIEnv* env, jobject /*jobj*/, jlong jreserved_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jreserved_size);
+ rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jreserved_size);
if (s.ok()) {
return reinterpret_cast<jlong>(
new rocksdb::VectorRepFactory(static_cast<size_t>(jreserved_size)));
*/
jlong Java_org_rocksdb_SkipListMemTableConfig_newMemTableFactoryHandle0(
JNIEnv* env, jobject /*jobj*/, jlong jlookahead) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jlookahead);
+ rocksdb::Status s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jlookahead);
if (s.ok()) {
return reinterpret_cast<jlong>(
new rocksdb::SkipListFactory(static_cast<size_t>(jlookahead)));
* Signature: (JLjava/lang/String;)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2(
- JNIEnv* env, jclass /*jcls*/, jlong joptions_handle, jstring jdb_path) {
+ JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
// exception thrown: OutOfMemoryError
*/
jlongArray
Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J(
- JNIEnv* env, jclass /*jcls*/, jlong jdb_options_handle, jstring jdb_path,
+ JNIEnv* env, jclass, jlong jdb_options_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options_handles) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
return nullptr;
}
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
+ assert(optimistic_txn_db != nullptr);
+ delete optimistic_txn_db;
+}
+
+/*
+ * Class: org_rocksdb_OptimisticTransactionDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_OptimisticTransactionDB_closeDatabase(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* optimistic_txn_db =
+ reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
+ assert(optimistic_txn_db != nullptr);
+ rocksdb::Status s = optimistic_txn_db->Close();
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
/*
* Class: org_rocksdb_OptimisticTransactionDB
* Method: beginTransaction
* Signature: (JJ)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJ(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_options_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
auto* write_options =
* Signature: (JJJ)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_options_handle, jlong jold_txn_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jold_txn_handle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
auto* write_options =
* Signature: (JJJJ)J
*/
jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_options_handle, jlong joptimistic_txn_options_handle,
- jlong jold_txn_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong joptimistic_txn_options_handle, jlong jold_txn_handle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
auto* write_options =
* Method: getBaseDB
* Signature: (J)J
*/
-jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB(
+ JNIEnv*, jobject, jlong jhandle) {
auto* optimistic_txn_db =
reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
return reinterpret_cast<jlong>(optimistic_txn_db->GetBaseDB());
}
-
-/*
- * Class: org_rocksdb_OptimisticTransactionDB
- * Method: disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- delete reinterpret_cast<rocksdb::OptimisticTransactionDB*>(jhandle);
-}
#include "rocksjni/comparatorjnicallback.h"
#include "rocksjni/portal.h"
#include "rocksjni/statisticsjni.h"
+#include "rocksjni/table_filter_jnicallback.h"
#include "rocksdb/comparator.h"
#include "rocksdb/convenience.h"
* Method: newOptions
* Signature: ()J
*/
-jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* /*env*/, jclass /*jcls*/) {
+jlong Java_org_rocksdb_Options_newOptions__(
+ JNIEnv*, jclass) {
auto* op = new rocksdb::Options();
return reinterpret_cast<jlong>(op);
}
* Method: newOptions
* Signature: (JJ)J
*/
-jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* /*env*/, jclass /*jcls*/,
- jlong jdboptions,
- jlong jcfoptions) {
+jlong Java_org_rocksdb_Options_newOptions__JJ(
+ JNIEnv*, jclass, jlong jdboptions, jlong jcfoptions) {
auto* dbOpt = reinterpret_cast<const rocksdb::DBOptions*>(jdboptions);
auto* cfOpt =
reinterpret_cast<const rocksdb::ColumnFamilyOptions*>(jcfoptions);
* Method: copyOptions
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_copyOptions(JNIEnv* /*env*/, jclass /*jcls*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_copyOptions(
+ JNIEnv*, jclass, jlong jhandle) {
auto new_opt =
new rocksdb::Options(*(reinterpret_cast<rocksdb::Options*>(jhandle)));
return reinterpret_cast<jlong>(new_opt);
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_Options_disposeInternal(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong handle) {
+void Java_org_rocksdb_Options_disposeInternal(
+ JNIEnv*, jobject, jlong handle) {
auto* op = reinterpret_cast<rocksdb::Options*>(handle);
assert(op != nullptr);
delete op;
* Method: setIncreaseParallelism
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setIncreaseParallelism(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint totalThreads) {
+void Java_org_rocksdb_Options_setIncreaseParallelism(
+ JNIEnv*, jobject, jlong jhandle, jint totalThreads) {
reinterpret_cast<rocksdb::Options*>(jhandle)->IncreaseParallelism(
static_cast<int>(totalThreads));
}
* Method: setCreateIfMissing
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setCreateIfMissing(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle, jboolean flag) {
+void Java_org_rocksdb_Options_setCreateIfMissing(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
reinterpret_cast<rocksdb::Options*>(jhandle)->create_if_missing = flag;
}
* Method: createIfMissing
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_createIfMissing(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_createIfMissing(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->create_if_missing;
}
* Method: setCreateMissingColumnFamilies
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setCreateMissingColumnFamilies(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean flag) {
+void Java_org_rocksdb_Options_setCreateMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
reinterpret_cast<rocksdb::Options*>(jhandle)->create_missing_column_families =
flag;
}
* Method: createMissingColumnFamilies
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_createMissingColumnFamilies(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_createMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->create_missing_column_families;
}
* Method: setComparatorHandle
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setComparatorHandle__JI(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint builtinComparator) {
+void Java_org_rocksdb_Options_setComparatorHandle__JI(
+ JNIEnv*, jobject, jlong jhandle, jint builtinComparator) {
switch (builtinComparator) {
case 1:
reinterpret_cast<rocksdb::Options*>(jhandle)->comparator =
* Method: setComparatorHandle
* Signature: (JJB)V
*/
-void Java_org_rocksdb_Options_setComparatorHandle__JJB(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jopt_handle,
- jlong jcomparator_handle,
- jbyte jcomparator_type) {
+void Java_org_rocksdb_Options_setComparatorHandle__JJB(
+ JNIEnv*, jobject, jlong jopt_handle, jlong jcomparator_handle,
+ jbyte jcomparator_type) {
rocksdb::Comparator* comparator = nullptr;
switch (jcomparator_type) {
// JAVA_COMPARATOR
* Method: setMergeOperatorName
* Signature: (JJjava/lang/String)V
*/
-void Java_org_rocksdb_Options_setMergeOperatorName(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle,
- jstring jop_name) {
+void Java_org_rocksdb_Options_setMergeOperatorName(
+ JNIEnv* env, jobject, jlong jhandle, jstring jop_name) {
const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
if (op_name == nullptr) {
// exception thrown: OutOfMemoryError
* Method: setMergeOperator
* Signature: (JJjava/lang/String)V
*/
-void Java_org_rocksdb_Options_setMergeOperator(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jlong mergeOperatorHandle) {
+void Java_org_rocksdb_Options_setMergeOperator(
+ JNIEnv*, jobject, jlong jhandle, jlong mergeOperatorHandle) {
reinterpret_cast<rocksdb::Options*>(jhandle)->merge_operator =
*(reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>*>(
mergeOperatorHandle));
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setCompactionFilterHandle(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle,
+ JNIEnv*, jobject, jlong jopt_handle,
jlong jcompactionfilter_handle) {
reinterpret_cast<rocksdb::Options*>(jopt_handle)->
compaction_filter = reinterpret_cast<rocksdb::CompactionFilter*>
* Signature: (JJ)V
*/
void JNICALL Java_org_rocksdb_Options_setCompactionFilterFactoryHandle(
- JNIEnv* /* env */, jobject /* jobj */, jlong jopt_handle,
+ JNIEnv*, jobject, jlong jopt_handle,
jlong jcompactionfilterfactory_handle) {
auto* cff_factory =
reinterpret_cast<std::shared_ptr<rocksdb::CompactionFilterFactory> *>(
* Method: setWriteBufferSize
* Signature: (JJ)I
*/
-void Java_org_rocksdb_Options_setWriteBufferSize(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle,
- jlong jwrite_buffer_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size);
+void Java_org_rocksdb_Options_setWriteBufferSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jwrite_buffer_size) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jwrite_buffer_size);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->write_buffer_size =
jwrite_buffer_size;
* Method: setWriteBufferManager
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setWriteBufferManager(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong joptions_handle,
- jlong jwrite_buffer_manager_handle) {
+void Java_org_rocksdb_Options_setWriteBufferManager(
+ JNIEnv*, jobject, jlong joptions_handle,
+ jlong jwrite_buffer_manager_handle) {
auto* write_buffer_manager =
reinterpret_cast<std::shared_ptr<rocksdb::WriteBufferManager> *>(jwrite_buffer_manager_handle);
reinterpret_cast<rocksdb::Options*>(joptions_handle)->write_buffer_manager =
* Method: writeBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_writeBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_writeBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->write_buffer_size;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMaxWriteBufferNumber(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jmax_write_buffer_number) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_write_buffer_number =
jmax_write_buffer_number;
* Method: setStatistics
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setStatistics(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jlong jstatistics_handle) {
+void Java_org_rocksdb_Options_setStatistics(
+ JNIEnv*, jobject, jlong jhandle, jlong jstatistics_handle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* pSptr = reinterpret_cast<std::shared_ptr<rocksdb::StatisticsJni>*>(
jstatistics_handle);
* Method: statistics
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_statistics(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_statistics(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
std::shared_ptr<rocksdb::Statistics> sptr = opt->statistics;
if (sptr == nullptr) {
* Method: maxWriteBufferNumber
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxWriteBufferNumber(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxWriteBufferNumber(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_write_buffer_number;
}
* Method: errorIfExists
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_errorIfExists(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_errorIfExists(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->error_if_exists;
}
* Method: setErrorIfExists
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setErrorIfExists(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jboolean error_if_exists) {
+void Java_org_rocksdb_Options_setErrorIfExists(
+ JNIEnv*, jobject, jlong jhandle, jboolean error_if_exists) {
reinterpret_cast<rocksdb::Options*>(jhandle)->error_if_exists =
static_cast<bool>(error_if_exists);
}
* Method: paranoidChecks
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_paranoidChecks(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_paranoidChecks(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_checks;
}
* Method: setParanoidChecks
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setParanoidChecks(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jboolean paranoid_checks) {
+void Java_org_rocksdb_Options_setParanoidChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean paranoid_checks) {
reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_checks =
static_cast<bool>(paranoid_checks);
}
* Method: setEnv
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setEnv(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jlong jenv) {
+void Java_org_rocksdb_Options_setEnv(
+ JNIEnv*, jobject, jlong jhandle, jlong jenv) {
reinterpret_cast<rocksdb::Options*>(jhandle)->env =
reinterpret_cast<rocksdb::Env*>(jenv);
}
* Method: setMaxTotalWalSize
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setMaxTotalWalSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jmax_total_wal_size) {
+void Java_org_rocksdb_Options_setMaxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_total_wal_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_total_wal_size =
static_cast<jlong>(jmax_total_wal_size);
}
* Method: maxTotalWalSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_maxTotalWalSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_total_wal_size;
}
* Method: maxOpenFiles
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_open_files;
}
* Method: setMaxOpenFiles
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setMaxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jint max_open_files) {
+void Java_org_rocksdb_Options_setMaxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle, jint max_open_files) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_open_files =
static_cast<int>(max_open_files);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMaxFileOpeningThreads(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jmax_file_opening_threads) {
+ JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_file_opening_threads =
static_cast<int>(jmax_file_opening_threads);
}
* Method: maxFileOpeningThreads
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxFileOpeningThreads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxFileOpeningThreads(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<int>(opt->max_file_opening_threads);
}
* Method: useFsync
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_useFsync(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_useFsync(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->use_fsync;
}
* Method: setUseFsync
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setUseFsync(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jboolean use_fsync) {
+void Java_org_rocksdb_Options_setUseFsync(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_fsync) {
reinterpret_cast<rocksdb::Options*>(jhandle)->use_fsync =
static_cast<bool>(use_fsync);
}
* Method: setDbPaths
* Signature: (J[Ljava/lang/String;[J)V
*/
-void Java_org_rocksdb_Options_setDbPaths(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle, jobjectArray jpaths,
- jlongArray jtarget_sizes) {
+void Java_org_rocksdb_Options_setDbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
std::vector<rocksdb::DbPath> db_paths;
jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
if (ptr_jtarget_size == nullptr) {
* Method: dbPathsLen
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_dbPathsLen(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_dbPathsLen(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jlong>(opt->db_paths.size());
}
* Method: dbPaths
* Signature: (J[Ljava/lang/String;[J)V
*/
-void Java_org_rocksdb_Options_dbPaths(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle, jobjectArray jpaths,
- jlongArray jtarget_sizes) {
+void Java_org_rocksdb_Options_dbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
if (ptr_jtarget_size == nullptr) {
// exception thrown: OutOfMemoryError
* Method: dbLogDir
* Signature: (J)Ljava/lang/String
*/
-jstring Java_org_rocksdb_Options_dbLogDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_Options_dbLogDir(
+ JNIEnv* env, jobject, jlong jhandle) {
return env->NewStringUTF(
reinterpret_cast<rocksdb::Options*>(jhandle)->db_log_dir.c_str());
}
* Method: setDbLogDir
* Signature: (JLjava/lang/String)V
*/
-void Java_org_rocksdb_Options_setDbLogDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle, jstring jdb_log_dir) {
+void Java_org_rocksdb_Options_setDbLogDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jdb_log_dir) {
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
if (log_dir == nullptr) {
// exception thrown: OutOfMemoryError
* Method: walDir
* Signature: (J)Ljava/lang/String
*/
-jstring Java_org_rocksdb_Options_walDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_Options_walDir(
+ JNIEnv* env, jobject, jlong jhandle) {
return env->NewStringUTF(
reinterpret_cast<rocksdb::Options*>(jhandle)->wal_dir.c_str());
}
* Method: setWalDir
* Signature: (JLjava/lang/String)V
*/
-void Java_org_rocksdb_Options_setWalDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle, jstring jwal_dir) {
+void Java_org_rocksdb_Options_setWalDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jwal_dir) {
const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr);
if (wal_dir == nullptr) {
// exception thrown: OutOfMemoryError
* Method: deleteObsoleteFilesPeriodMicros
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->delete_obsolete_files_period_micros;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong micros) {
+ JNIEnv*, jobject, jlong jhandle, jlong micros) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->delete_obsolete_files_period_micros = static_cast<int64_t>(micros);
}
* Method: setBaseBackgroundCompactions
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setBaseBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint max) {
+void Java_org_rocksdb_Options_setBaseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
reinterpret_cast<rocksdb::Options*>(jhandle)->base_background_compactions =
static_cast<int>(max);
}
* Method: baseBackgroundCompactions
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_baseBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_baseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->base_background_compactions;
}
* Method: maxBackgroundCompactions
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->max_background_compactions;
}
* Method: setMaxBackgroundCompactions
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setMaxBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint max) {
+void Java_org_rocksdb_Options_setMaxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_compactions =
static_cast<int>(max);
}
* Method: setMaxSubcompactions
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setMaxSubcompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle, jint max) {
+void Java_org_rocksdb_Options_setMaxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_subcompactions =
static_cast<int32_t>(max);
}
* Method: maxSubcompactions
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxSubcompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_subcompactions;
}
* Method: maxBackgroundFlushes
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxBackgroundFlushes(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxBackgroundFlushes(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_flushes;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMaxBackgroundFlushes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint max_background_flushes) {
+ JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_flushes =
static_cast<int>(max_background_flushes);
}
* Method: maxBackgroundJobs
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxBackgroundJobs(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_jobs;
}
* Method: setMaxBackgroundJobs
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setMaxBackgroundJobs(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint max_background_jobs) {
+void Java_org_rocksdb_Options_setMaxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle, jint max_background_jobs) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_background_jobs =
static_cast<int>(max_background_jobs);
}
* Method: maxLogFileSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_maxLogFileSize(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxLogFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size;
}
* Method: setMaxLogFileSize
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setMaxLogFileSize(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle,
- jlong max_log_file_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size);
+void Java_org_rocksdb_Options_setMaxLogFileSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong max_log_file_size) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(max_log_file_size);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_log_file_size =
max_log_file_size;
* Method: logFileTimeToRoll
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_logFileTimeToRoll(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_logFileTimeToRoll(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setLogFileTimeToRoll(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong log_file_time_to_roll) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(log_file_time_to_roll);
+ JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(log_file_time_to_roll);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->log_file_time_to_roll =
log_file_time_to_roll;
* Method: keepLogFileNum
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_keepLogFileNum(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_keepLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num;
}
* Method: setKeepLogFileNum
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setKeepLogFileNum(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle,
- jlong keep_log_file_num) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num);
+void Java_org_rocksdb_Options_setKeepLogFileNum(
+ JNIEnv* env, jobject, jlong jhandle, jlong keep_log_file_num) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->keep_log_file_num =
keep_log_file_num;
* Method: recycleLogFileNum
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_recycleLogFileNum(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_recycleLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->recycle_log_file_num;
}
* Method: setRecycleLogFileNum
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setRecycleLogFileNum(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle,
- jlong recycle_log_file_num) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num);
+void Java_org_rocksdb_Options_setRecycleLogFileNum(
+ JNIEnv* env, jobject, jlong jhandle, jlong recycle_log_file_num) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(recycle_log_file_num);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->recycle_log_file_num =
recycle_log_file_num;
* Method: maxManifestFileSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_maxManifestFileSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxManifestFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_manifest_file_size;
}
* Method: memTableFactoryName
* Signature: (J)Ljava/lang/String
*/
-jstring Java_org_rocksdb_Options_memTableFactoryName(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_Options_memTableFactoryName(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMaxManifestFileSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong max_manifest_file_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_manifest_file_size =
static_cast<int64_t>(max_manifest_file_size);
}
* Method: setMemTableFactory
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setMemTableFactory(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jfactory_handle) {
+void Java_org_rocksdb_Options_setMemTableFactory(
+ JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) {
reinterpret_cast<rocksdb::Options*>(jhandle)->memtable_factory.reset(
reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle));
}
* Method: setRateLimiter
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setRateLimiter(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jlong jrate_limiter_handle) {
+void Java_org_rocksdb_Options_setRateLimiter(
+ JNIEnv*, jobject, jlong jhandle, jlong jrate_limiter_handle) {
std::shared_ptr<rocksdb::RateLimiter>* pRateLimiter =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter>*>(
jrate_limiter_handle);
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setSstFileManager(
- JNIEnv* /*env*/, jobject /*job*/, jlong jhandle,
- jlong jsst_file_manager_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) {
auto* sptr_sst_file_manager =
reinterpret_cast<std::shared_ptr<rocksdb::SstFileManager>*>(
jsst_file_manager_handle);
* Method: setLogger
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setLogger(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jlong jlogger_handle) {
+void Java_org_rocksdb_Options_setLogger(
+ JNIEnv*, jobject, jlong jhandle, jlong jlogger_handle) {
std::shared_ptr<rocksdb::LoggerJniCallback>* pLogger =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback>*>(
jlogger_handle);
* Method: setInfoLogLevel
* Signature: (JB)V
*/
-void Java_org_rocksdb_Options_setInfoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jbyte jlog_level) {
+void Java_org_rocksdb_Options_setInfoLogLevel(
+ JNIEnv*, jobject, jlong jhandle, jbyte jlog_level) {
reinterpret_cast<rocksdb::Options*>(jhandle)->info_log_level =
static_cast<rocksdb::InfoLogLevel>(jlog_level);
}
* Method: infoLogLevel
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Options_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_Options_infoLogLevel(
+ JNIEnv*, jobject, jlong jhandle) {
return static_cast<jbyte>(
reinterpret_cast<rocksdb::Options*>(jhandle)->info_log_level);
}
* Method: tableCacheNumshardbits
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_tableCacheNumshardbits(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_tableCacheNumshardbits(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->table_cache_numshardbits;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setTableCacheNumshardbits(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint table_cache_numshardbits) {
+ JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) {
reinterpret_cast<rocksdb::Options*>(jhandle)->table_cache_numshardbits =
static_cast<int>(table_cache_numshardbits);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jprefix_length) {
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
reinterpret_cast<rocksdb::Options*>(jhandle)->prefix_extractor.reset(
rocksdb::NewFixedPrefixTransform(static_cast<int>(jprefix_length)));
}
* Method: useCappedPrefixExtractor
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_useCappedPrefixExtractor(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint jprefix_length) {
+void Java_org_rocksdb_Options_useCappedPrefixExtractor(
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
reinterpret_cast<rocksdb::Options*>(jhandle)->prefix_extractor.reset(
rocksdb::NewCappedPrefixTransform(static_cast<int>(jprefix_length)));
}
* Method: walTtlSeconds
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_walTtlSeconds(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_walTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_ttl_seconds;
}
* Method: setWalTtlSeconds
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setWalTtlSeconds(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jlong WAL_ttl_seconds) {
+void Java_org_rocksdb_Options_setWalTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_ttl_seconds) {
reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_ttl_seconds =
static_cast<int64_t>(WAL_ttl_seconds);
}
* Method: walTtlSeconds
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_walSizeLimitMB(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_walSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_size_limit_MB;
}
* Method: setWalSizeLimitMB
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setWalSizeLimitMB(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jlong WAL_size_limit_MB) {
+void Java_org_rocksdb_Options_setWalSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_size_limit_MB) {
reinterpret_cast<rocksdb::Options*>(jhandle)->WAL_size_limit_MB =
static_cast<int64_t>(WAL_size_limit_MB);
}
* Method: manifestPreallocationSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_manifestPreallocationSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_manifestPreallocationSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->manifest_preallocation_size;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setManifestPreallocationSize(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong preallocation_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size);
+ JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(preallocation_size);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->manifest_preallocation_size =
preallocation_size;
* Method: setTableFactory
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setTableFactory(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jlong jfactory_handle) {
- reinterpret_cast<rocksdb::Options*>(jhandle)->table_factory.reset(
- reinterpret_cast<rocksdb::TableFactory*>(jfactory_handle));
+void Java_org_rocksdb_Options_setTableFactory(
+ JNIEnv*, jobject, jlong jhandle, jlong jtable_factory_handle) {
+ auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
+ auto* table_factory =
+ reinterpret_cast<rocksdb::TableFactory*>(jtable_factory_handle);
+ options->table_factory.reset(table_factory);
}
/*
* Method: allowMmapReads
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_allowMmapReads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_allowMmapReads(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_reads;
}
* Method: setAllowMmapReads
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setAllowMmapReads(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jboolean allow_mmap_reads) {
+void Java_org_rocksdb_Options_setAllowMmapReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_reads) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_reads =
static_cast<bool>(allow_mmap_reads);
}
* Method: allowMmapWrites
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_allowMmapWrites(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_allowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_writes;
}
* Method: setAllowMmapWrites
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setAllowMmapWrites(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean allow_mmap_writes) {
+void Java_org_rocksdb_Options_setAllowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_writes) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_mmap_writes =
static_cast<bool>(allow_mmap_writes);
}
* Method: useDirectReads
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_useDirectReads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_useDirectReads(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->use_direct_reads;
}
* Method: setUseDirectReads
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setUseDirectReads(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jboolean use_direct_reads) {
+void Java_org_rocksdb_Options_setUseDirectReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) {
reinterpret_cast<rocksdb::Options*>(jhandle)->use_direct_reads =
static_cast<bool>(use_direct_reads);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_useDirectIoForFlushAndCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->use_direct_io_for_flush_and_compaction;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setUseDirectIoForFlushAndCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean use_direct_io_for_flush_and_compaction) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->use_direct_io_for_flush_and_compaction =
* Method: setAllowFAllocate
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setAllowFAllocate(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jboolean jallow_fallocate) {
+void Java_org_rocksdb_Options_setAllowFAllocate(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_fallocate) {
reinterpret_cast<rocksdb::Options*>(jhandle)->allow_fallocate =
static_cast<bool>(jallow_fallocate);
}
* Method: allowFAllocate
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_allowFAllocate(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_allowFAllocate(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->allow_fallocate);
}
* Method: isFdCloseOnExec
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_isFdCloseOnExec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_isFdCloseOnExec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->is_fd_close_on_exec;
}
* Method: setIsFdCloseOnExec
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setIsFdCloseOnExec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean is_fd_close_on_exec) {
+void Java_org_rocksdb_Options_setIsFdCloseOnExec(
+ JNIEnv*, jobject, jlong jhandle, jboolean is_fd_close_on_exec) {
reinterpret_cast<rocksdb::Options*>(jhandle)->is_fd_close_on_exec =
static_cast<bool>(is_fd_close_on_exec);
}
* Method: statsDumpPeriodSec
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_statsDumpPeriodSec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_statsDumpPeriodSec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->stats_dump_period_sec;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setStatsDumpPeriodSec(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint stats_dump_period_sec) {
reinterpret_cast<rocksdb::Options*>(jhandle)->stats_dump_period_sec =
static_cast<int>(stats_dump_period_sec);
* Method: adviseRandomOnOpen
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->advise_random_on_open;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAdviseRandomOnOpen(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean advise_random_on_open) {
reinterpret_cast<rocksdb::Options*>(jhandle)->advise_random_on_open =
static_cast<bool>(advise_random_on_open);
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setDbWriteBufferSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jdb_write_buffer_size) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->db_write_buffer_size = static_cast<size_t>(jdb_write_buffer_size);
* Method: dbWriteBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_dbWriteBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_dbWriteBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jlong>(opt->db_write_buffer_size);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_Options_setAccessHintOnCompactionStart(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jbyte jaccess_hint_value) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->access_hint_on_compaction_start =
* Method: accessHintOnCompactionStart
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb::AccessHintJni::toJavaAccessHint(
opt->access_hint_on_compaction_start);
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setNewTableReaderForCompactionInputs(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean jnew_table_reader_for_compaction_inputs) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->new_table_reader_for_compaction_inputs =
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_newTableReaderForCompactionInputs(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setCompactionReadaheadSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jcompaction_readahead_size) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->compaction_readahead_size =
* Method: compactionReadaheadSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_compactionReadaheadSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_compactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jlong>(opt->compaction_readahead_size);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setRandomAccessMaxBufferSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jrandom_access_max_buffer_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->random_access_max_buffer_size =
static_cast<size_t>(jrandom_access_max_buffer_size);
* Method: randomAccessMaxBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jlong>(opt->random_access_max_buffer_size);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setWritableFileMaxBufferSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jwritable_file_max_buffer_size) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->writable_file_max_buffer_size =
* Method: writableFileMaxBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_writableFileMaxBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_writableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jlong>(opt->writable_file_max_buffer_size);
}
* Method: useAdaptiveMutex
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_useAdaptiveMutex(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_useAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->use_adaptive_mutex;
}
* Method: setUseAdaptiveMutex
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setUseAdaptiveMutex(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean use_adaptive_mutex) {
+void Java_org_rocksdb_Options_setUseAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_adaptive_mutex) {
reinterpret_cast<rocksdb::Options*>(jhandle)->use_adaptive_mutex =
static_cast<bool>(use_adaptive_mutex);
}
* Method: bytesPerSync
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_bytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_bytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->bytes_per_sync;
}
* Method: setBytesPerSync
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setBytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jlong bytes_per_sync) {
+void Java_org_rocksdb_Options_setBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) {
reinterpret_cast<rocksdb::Options*>(jhandle)->bytes_per_sync =
static_cast<int64_t>(bytes_per_sync);
}
* Method: setWalBytesPerSync
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setWalBytesPerSync(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jwal_bytes_per_sync) {
+void Java_org_rocksdb_Options_setWalBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_bytes_per_sync) {
reinterpret_cast<rocksdb::Options*>(jhandle)->wal_bytes_per_sync =
static_cast<int64_t>(jwal_bytes_per_sync);
}
* Method: walBytesPerSync
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_walBytesPerSync(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_walBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jlong>(opt->wal_bytes_per_sync);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setEnableThreadTracking(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jenable_thread_tracking) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->enable_thread_tracking = static_cast<bool>(jenable_thread_tracking);
}
* Method: enableThreadTracking
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_enableThreadTracking(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_enableThreadTracking(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->enable_thread_tracking);
}
* Method: setDelayedWriteRate
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setDelayedWriteRate(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jdelayed_write_rate) {
+void Java_org_rocksdb_Options_setDelayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle, jlong jdelayed_write_rate) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->delayed_write_rate = static_cast<uint64_t>(jdelayed_write_rate);
}
* Method: delayedWriteRate
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_delayedWriteRate(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_delayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jlong>(opt->delayed_write_rate);
}
+/*
+ * Class: org_rocksdb_Options
+ * Method: setEnablePipelinedWrite
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setEnablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: enablePipelinedWrite
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_enablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return static_cast<jboolean>(opt->enable_pipelined_write);
+}
+
/*
* Class: org_rocksdb_Options
* Method: setAllowConcurrentMemtableWrite
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean allow) {
+void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->allow_concurrent_memtable_write = static_cast<bool>(allow);
}
* Method: allowConcurrentMemtableWrite
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->allow_concurrent_memtable_write;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setEnableWriteThreadAdaptiveYield(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean yield) {
+ JNIEnv*, jobject, jlong jhandle, jboolean yield) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->enable_write_thread_adaptive_yield = static_cast<bool>(yield);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_enableWriteThreadAdaptiveYield(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->enable_write_thread_adaptive_yield;
}
* Method: setWriteThreadMaxYieldUsec
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong max) {
+void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong max) {
reinterpret_cast<rocksdb::Options*>(jhandle)->write_thread_max_yield_usec =
static_cast<int64_t>(max);
}
* Method: writeThreadMaxYieldUsec
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->write_thread_max_yield_usec;
}
* Method: setWriteThreadSlowYieldUsec
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong slow) {
+void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong slow) {
reinterpret_cast<rocksdb::Options*>(jhandle)->write_thread_slow_yield_usec =
static_cast<int64_t>(slow);
}
* Method: writeThreadSlowYieldUsec
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->write_thread_slow_yield_usec;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setSkipStatsUpdateOnDbOpen(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean jskip_stats_update_on_db_open) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->skip_stats_update_on_db_open =
* Method: skipStatsUpdateOnDbOpen
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->skip_stats_update_on_db_open);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_Options_setWalRecoveryMode(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jbyte jwal_recovery_mode_value) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->wal_recovery_mode = rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode(
* Method: walRecoveryMode
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Options_walRecoveryMode(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_Options_walRecoveryMode(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode(
opt->wal_recovery_mode);
* Method: setAllow2pc
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setAllow2pc(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jboolean jallow_2pc) {
+void Java_org_rocksdb_Options_setAllow2pc(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_2pc) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->allow_2pc = static_cast<bool>(jallow_2pc);
}
* Method: allow2pc
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_allow2pc(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_allow2pc(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->allow_2pc);
}
* Method: setRowCache
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setRowCache(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jlong jrow_cache_handle) {
+void Java_org_rocksdb_Options_setRowCache(
+ JNIEnv*, jobject, jlong jhandle, jlong jrow_cache_handle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* row_cache =
reinterpret_cast<std::shared_ptr<rocksdb::Cache>*>(jrow_cache_handle);
opt->row_cache = *row_cache;
}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setWalFilter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setWalFilter(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_filter_handle) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ auto* wal_filter =
+ reinterpret_cast<rocksdb::WalFilterJniCallback*>(jwal_filter_handle);
+ opt->wal_filter = wal_filter;
+}
+
/*
* Class: org_rocksdb_Options
* Method: setFailIfOptionsFileError
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setFailIfOptionsFileError(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jfail_if_options_file_error) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->fail_if_options_file_error =
static_cast<bool>(jfail_if_options_file_error);
* Method: failIfOptionsFileError
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_failIfOptionsFileError(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_failIfOptionsFileError(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->fail_if_options_file_error);
}
* Method: setDumpMallocStats
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setDumpMallocStats(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean jdump_malloc_stats) {
+void Java_org_rocksdb_Options_setDumpMallocStats(
+ JNIEnv*, jobject, jlong jhandle, jboolean jdump_malloc_stats) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->dump_malloc_stats = static_cast<bool>(jdump_malloc_stats);
}
* Method: dumpMallocStats
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_dumpMallocStats(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_dumpMallocStats(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->dump_malloc_stats);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAvoidFlushDuringRecovery(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean javoid_flush_during_recovery) {
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->avoid_flush_during_recovery =
static_cast<bool>(javoid_flush_during_recovery);
* Method: avoidFlushDuringRecovery
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->avoid_flush_during_recovery);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setAvoidFlushDuringShutdown(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean javoid_flush_during_shutdown) {
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
opt->avoid_flush_during_shutdown =
static_cast<bool>(javoid_flush_during_shutdown);
* Method: avoidFlushDuringShutdown
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<jboolean>(opt->avoid_flush_during_shutdown);
}
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAllowIngestBehind
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAllowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: allowIngestBehind
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_allowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return static_cast<jboolean>(opt->allow_ingest_behind);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setPreserveDeletes
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setPreserveDeletes(
+ JNIEnv*, jobject, jlong jhandle, jboolean jpreserve_deletes) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ opt->preserve_deletes = jpreserve_deletes == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: preserveDeletes
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_preserveDeletes(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return static_cast<jboolean>(opt->preserve_deletes);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setTwoWriteQueues
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setTwoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle, jboolean jtwo_write_queues) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ opt->two_write_queues = jtwo_write_queues == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: twoWriteQueues
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_twoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return static_cast<jboolean>(opt->two_write_queues);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setManualWalFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setManualWalFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jmanual_wal_flush) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: manualWalFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_manualWalFlush(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return static_cast<jboolean>(opt->manual_wal_flush);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: setAtomicFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_Options_setAtomicFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jatomic_flush) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ opt->atomic_flush = jatomic_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: atomicFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_Options_atomicFlush(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return static_cast<jboolean>(opt->atomic_flush);
+}
+
/*
* Method: tableFactoryName
* Signature: (J)Ljava/lang/String
*/
-jstring Java_org_rocksdb_Options_tableFactoryName(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_Options_tableFactoryName(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::Options*>(jhandle);
rocksdb::TableFactory* tf = opt->table_factory.get();
* Method: minWriteBufferNumberToMerge
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->min_write_buffer_number_to_merge;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jmin_write_buffer_number_to_merge) {
+ JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->min_write_buffer_number_to_merge =
static_cast<int>(jmin_write_buffer_number_to_merge);
* Method: maxWriteBufferNumberToMaintain
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->max_write_buffer_number_to_maintain;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jmax_write_buffer_number_to_maintain) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->max_write_buffer_number_to_maintain =
* Signature: (JB)V
*/
void Java_org_rocksdb_Options_setCompressionType(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jcompression_type_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType(
jcompression_type_value);
* Method: compressionType
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Options_compressionType(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_Options_compressionType(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb::CompressionTypeJni::toJavaCompressionType(opts->compression);
}
* @return A std::unique_ptr to the vector, or std::unique_ptr(nullptr) if a JNI
* exception occurs
*/
-std::unique_ptr<std::vector<rocksdb::CompressionType>>
-rocksdb_compression_vector_helper(JNIEnv* env, jbyteArray jcompression_levels) {
+std::unique_ptr<std::vector<rocksdb::CompressionType>>rocksdb_compression_vector_helper(
+ JNIEnv* env, jbyteArray jcompression_levels) {
jsize len = env->GetArrayLength(jcompression_levels);
jbyte* jcompression_level =
env->GetByteArrayElements(jcompression_levels, nullptr);
* Signature: (J[B)V
*/
void Java_org_rocksdb_Options_setCompressionPerLevel(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jbyteArray jcompressionLevels) {
+ JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) {
auto uptr_compression_levels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
if (!uptr_compression_levels) {
* Method: compressionPerLevel
* Signature: (J)[B
*/
-jbyteArray Java_org_rocksdb_Options_compressionPerLevel(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyteArray Java_org_rocksdb_Options_compressionPerLevel(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb_compression_list_helper(env, options->compression_per_level);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_Options_setBottommostCompressionType(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jcompression_type_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
options->bottommost_compression =
rocksdb::CompressionTypeJni::toCppCompressionType(
* Method: bottommostCompressionType
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Options_bottommostCompressionType(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_Options_bottommostCompressionType(
+ JNIEnv*, jobject, jlong jhandle) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb::CompressionTypeJni::toJavaCompressionType(
options->bottommost_compression);
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setBottommostCompressionOptions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jbottommost_compression_options_handle) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* bottommost_compression_options =
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setCompressionOptions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jcompression_options_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) {
auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* compression_options = reinterpret_cast<rocksdb::CompressionOptions*>(
jcompression_options_handle);
* Method: setCompactionStyle
* Signature: (JB)V
*/
-void Java_org_rocksdb_Options_setCompactionStyle(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jbyte compaction_style) {
- reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_style =
- static_cast<rocksdb::CompactionStyle>(compaction_style);
+void Java_org_rocksdb_Options_setCompactionStyle(
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_style) {
+ auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
+ options->compaction_style =
+ rocksdb::CompactionStyleJni::toCppCompactionStyle(
+ jcompaction_style);
}
/*
* Method: compactionStyle
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Options_compactionStyle(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- return reinterpret_cast<rocksdb::Options*>(jhandle)->compaction_style;
+jbyte Java_org_rocksdb_Options_compactionStyle(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* options = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return rocksdb::CompactionStyleJni::toJavaCompactionStyle(
+ options->compaction_style);
}
/*
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMaxTableFilesSizeFIFO(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jmax_table_files_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->compaction_options_fifo.max_table_files_size =
static_cast<uint64_t>(jmax_table_files_size);
* Method: maxTableFilesSizeFIFO
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->compaction_options_fifo.max_table_files_size;
}
* Method: numLevels
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_numLevels(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_numLevels(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->num_levels;
}
* Method: setNumLevels
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setNumLevels(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jint jnum_levels) {
+void Java_org_rocksdb_Options_setNumLevels(
+ JNIEnv*, jobject, jlong jhandle, jint jnum_levels) {
reinterpret_cast<rocksdb::Options*>(jhandle)->num_levels =
static_cast<int>(jnum_levels);
}
* Signature: (J)I
*/
jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_file_num_compaction_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jlevel0_file_num_compaction_trigger) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_file_num_compaction_trigger =
* Method: levelZeroSlowdownWritesTrigger
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_slowdown_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_slowdown_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
reinterpret_cast<rocksdb::Options*>(jhandle)->level0_slowdown_writes_trigger =
static_cast<int>(jlevel0_slowdown_writes_trigger);
}
* Method: levelZeroStopWritesTrigger
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_stop_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_stop_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
reinterpret_cast<rocksdb::Options*>(jhandle)->level0_stop_writes_trigger =
static_cast<int>(jlevel0_stop_writes_trigger);
}
* Method: targetFileSizeBase
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_targetFileSizeBase(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_targetFileSizeBase(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->target_file_size_base;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setTargetFileSizeBase(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jtarget_file_size_base) {
+ JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) {
reinterpret_cast<rocksdb::Options*>(jhandle)->target_file_size_base =
static_cast<uint64_t>(jtarget_file_size_base);
}
* Method: targetFileSizeMultiplier
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_targetFileSizeMultiplier(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_targetFileSizeMultiplier(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->target_file_size_multiplier;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setTargetFileSizeMultiplier(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jtarget_file_size_multiplier) {
+ JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) {
reinterpret_cast<rocksdb::Options*>(jhandle)->target_file_size_multiplier =
static_cast<int>(jtarget_file_size_multiplier);
}
* Method: maxBytesForLevelBase
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_maxBytesForLevelBase(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxBytesForLevelBase(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_bytes_for_level_base;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMaxBytesForLevelBase(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jmax_bytes_for_level_base) {
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_bytes_for_level_base =
static_cast<int64_t>(jmax_bytes_for_level_base);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_levelCompactionDynamicLevelBytes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->level_compaction_dynamic_level_bytes;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setLevelCompactionDynamicLevelBytes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jenable_dynamic_level_bytes) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes);
}
* Method: maxBytesForLevelMultiplier
* Signature: (J)D
*/
-jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->max_bytes_for_level_multiplier;
}
* Signature: (JD)V
*/
void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jdouble jmax_bytes_for_level_multiplier) {
+ JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_bytes_for_level_multiplier =
static_cast<double>(jmax_bytes_for_level_multiplier);
}
* Method: maxCompactionBytes
* Signature: (J)I
*/
-jlong Java_org_rocksdb_Options_maxCompactionBytes(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxCompactionBytes(
+ JNIEnv*, jobject, jlong jhandle) {
return static_cast<jlong>(
reinterpret_cast<rocksdb::Options*>(jhandle)->max_compaction_bytes);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMaxCompactionBytes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jmax_compaction_bytes) {
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_compaction_bytes =
static_cast<uint64_t>(jmax_compaction_bytes);
}
* Method: arenaBlockSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_arenaBlockSize(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_arenaBlockSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->arena_block_size;
}
* Method: setArenaBlockSize
* Signature: (JJ)V
*/
-void Java_org_rocksdb_Options_setArenaBlockSize(JNIEnv* env,
- jobject /*jobj*/, jlong jhandle,
- jlong jarena_block_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size);
+void Java_org_rocksdb_Options_setArenaBlockSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong jarena_block_size) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jarena_block_size);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->arena_block_size =
jarena_block_size;
* Method: disableAutoCompactions
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_disableAutoCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_disableAutoCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->disable_auto_compactions;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setDisableAutoCompactions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jdisable_auto_compactions) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) {
reinterpret_cast<rocksdb::Options*>(jhandle)->disable_auto_compactions =
static_cast<bool>(jdisable_auto_compactions);
}
* Method: maxSequentialSkipInIterations
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->max_sequential_skip_in_iterations;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jmax_sequential_skip_in_iterations) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->max_sequential_skip_in_iterations =
* Method: inplaceUpdateSupport
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_inplaceUpdateSupport(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_inplaceUpdateSupport(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->inplace_update_support;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setInplaceUpdateSupport(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jinplace_update_support) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) {
reinterpret_cast<rocksdb::Options*>(jhandle)->inplace_update_support =
static_cast<bool>(jinplace_update_support);
}
* Method: inplaceUpdateNumLocks
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->inplace_update_num_locks;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setInplaceUpdateNumLocks(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong jinplace_update_num_locks) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(jinplace_update_num_locks);
+ JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jinplace_update_num_locks);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->inplace_update_num_locks =
jinplace_update_num_locks;
* Method: memtablePrefixBloomSizeRatio
* Signature: (J)I
*/
-jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->memtable_prefix_bloom_size_ratio;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jdouble jmemtable_prefix_bloom_size_ratio) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->memtable_prefix_bloom_size_ratio =
* Method: bloomLocality
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_bloomLocality(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_bloomLocality(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->bloom_locality;
}
* Method: setBloomLocality
* Signature: (JI)V
*/
-void Java_org_rocksdb_Options_setBloomLocality(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jint jbloom_locality) {
+void Java_org_rocksdb_Options_setBloomLocality(
+ JNIEnv*, jobject, jlong jhandle, jint jbloom_locality) {
reinterpret_cast<rocksdb::Options*>(jhandle)->bloom_locality =
static_cast<int32_t>(jbloom_locality);
}
* Method: maxSuccessiveMerges
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_maxSuccessiveMerges(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_maxSuccessiveMerges(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->max_successive_merges;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMaxSuccessiveMerges(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong jmax_successive_merges) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(jmax_successive_merges);
+ JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jmax_successive_merges);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->max_successive_merges =
jmax_successive_merges;
* Method: optimizeFiltersForHits
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_optimizeFiltersForHits(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_optimizeFiltersForHits(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->optimize_filters_for_hits;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setOptimizeFiltersForHits(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean joptimize_filters_for_hits) {
+ JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) {
reinterpret_cast<rocksdb::Options*>(jhandle)->optimize_filters_for_hits =
static_cast<bool>(joptimize_filters_for_hits);
}
* Method: optimizeForSmallDb
* Signature: (J)V
*/
-void Java_org_rocksdb_Options_optimizeForSmallDb(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_Options_optimizeForSmallDb(
+ JNIEnv*, jobject, jlong jhandle) {
reinterpret_cast<rocksdb::Options*>(jhandle)->OptimizeForSmallDb();
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_optimizeForPointLookup(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong block_cache_size_mb) {
+ JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) {
reinterpret_cast<rocksdb::Options*>(jhandle)->OptimizeForPointLookup(
block_cache_size_mb);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_optimizeLevelStyleCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong memtable_memory_budget) {
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
reinterpret_cast<rocksdb::Options*>(jhandle)->OptimizeLevelStyleCompaction(
memtable_memory_budget);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_optimizeUniversalStyleCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong memtable_memory_budget) {
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->OptimizeUniversalStyleCompaction(memtable_memory_budget);
}
* Method: prepareForBulkLoad
* Signature: (J)V
*/
-void Java_org_rocksdb_Options_prepareForBulkLoad(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_Options_prepareForBulkLoad(
+ JNIEnv*, jobject, jlong jhandle) {
reinterpret_cast<rocksdb::Options*>(jhandle)->PrepareForBulkLoad();
}
* Method: memtableHugePageSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_memtableHugePageSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_memtableHugePageSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->memtable_huge_page_size;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setMemtableHugePageSize(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong jmemtable_huge_page_size) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(jmemtable_huge_page_size);
+ JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jmemtable_huge_page_size);
if (s.ok()) {
reinterpret_cast<rocksdb::Options*>(jhandle)->memtable_huge_page_size =
jmemtable_huge_page_size;
* Method: softPendingCompactionBytesLimit
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->soft_pending_compaction_bytes_limit;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setSoftPendingCompactionBytesLimit(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jsoft_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->soft_pending_compaction_bytes_limit =
* Method: softHardCompactionBytesLimit
* Signature: (J)J
*/
-jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->hard_pending_compaction_bytes_limit;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setHardPendingCompactionBytesLimit(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jhard_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->hard_pending_compaction_bytes_limit =
* Method: level0FileNumCompactionTrigger
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_file_num_compaction_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevel0FileNumCompactionTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jlevel0_file_num_compaction_trigger) {
reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_file_num_compaction_trigger =
* Method: level0SlowdownWritesTrigger
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_slowdown_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevel0SlowdownWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_slowdown_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
reinterpret_cast<rocksdb::Options*>(jhandle)->level0_slowdown_writes_trigger =
static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
}
* Method: level0StopWritesTrigger
* Signature: (J)I
*/
-jint Java_org_rocksdb_Options_level0StopWritesTrigger(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_Options_level0StopWritesTrigger(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)
->level0_stop_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_Options_setLevel0StopWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_stop_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
reinterpret_cast<rocksdb::Options*>(jhandle)->level0_stop_writes_trigger =
static_cast<int32_t>(jlevel0_stop_writes_trigger);
}
* Signature: (J)[I
*/
jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv* env, jobject, jlong jhandle) {
auto mbflma = reinterpret_cast<rocksdb::Options*>(jhandle)
->max_bytes_for_level_multiplier_additional;
* Signature: (J[I)V
*/
void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
+ JNIEnv* env, jobject, jlong jhandle,
jintArray jmax_bytes_for_level_multiplier_additional) {
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint* additionals = env->GetIntArrayElements(
* Method: paranoidFileChecks
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_paranoidFileChecks(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_paranoidFileChecks(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_file_checks;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setParanoidFileChecks(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jparanoid_file_checks) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) {
reinterpret_cast<rocksdb::Options*>(jhandle)->paranoid_file_checks =
static_cast<bool>(jparanoid_file_checks);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_Options_setCompactionPriority(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jcompaction_priority_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
opts->compaction_pri =
rocksdb::CompactionPriorityJni::toCppCompactionPriority(
* Method: compactionPriority
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Options_compactionPriority(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_Options_compactionPriority(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
return rocksdb::CompactionPriorityJni::toJavaCompactionPriority(
opts->compaction_pri);
* Method: setReportBgIoStats
* Signature: (JZ)V
*/
-void Java_org_rocksdb_Options_setReportBgIoStats(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean jreport_bg_io_stats) {
+void Java_org_rocksdb_Options_setReportBgIoStats(
+ JNIEnv*, jobject, jlong jhandle, jboolean jreport_bg_io_stats) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
opts->report_bg_io_stats = static_cast<bool>(jreport_bg_io_stats);
}
* Method: reportBgIoStats
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_reportBgIoStats(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_reportBgIoStats(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<bool>(opts->report_bg_io_stats);
}
+/*
+ * Class: org_rocksdb_Options
+ * Method: setTtl
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_Options_setTtl(
+ JNIEnv*, jobject, jlong jhandle, jlong jttl) {
+ auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
+ opts->ttl = static_cast<uint64_t>(jttl);
+}
+
+/*
+ * Class: org_rocksdb_Options
+ * Method: ttl
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_Options_ttl(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
+ return static_cast<jlong>(opts->ttl);
+}
+
/*
* Class: org_rocksdb_Options
* Method: setCompactionOptionsUniversal
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setCompactionOptionsUniversal(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jcompaction_options_universal_handle) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* opts_uni = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(
* Signature: (JJ)V
*/
void Java_org_rocksdb_Options_setCompactionOptionsFIFO(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jcompaction_options_fifo_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
auto* opts_fifo = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(
jcompaction_options_fifo_handle);
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setForceConsistencyChecks(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jforce_consistency_checks) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
opts->force_consistency_checks = static_cast<bool>(jforce_consistency_checks);
}
* Method: forceConsistencyChecks
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_Options_forceConsistencyChecks(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_Options_forceConsistencyChecks(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opts = reinterpret_cast<rocksdb::Options*>(jhandle);
return static_cast<bool>(opts->force_consistency_checks);
}
* Signature: ()J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(
- JNIEnv* /*env*/, jclass /*jcls*/) {
+ JNIEnv*, jclass) {
auto* op = new rocksdb::ColumnFamilyOptions();
return reinterpret_cast<jlong>(op);
}
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_copyColumnFamilyOptions(
- JNIEnv* /*env*/, jclass /*jcls*/, jlong jhandle) {
+ JNIEnv*, jclass, jlong jhandle) {
auto new_opt = new rocksdb::ColumnFamilyOptions(
*(reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)));
return reinterpret_cast<jlong>(new_opt);
}
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: newColumnFamilyOptionsFromOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptionsFromOptions(
+ JNIEnv*, jclass, jlong joptions_handle) {
+ auto new_opt = new rocksdb::ColumnFamilyOptions(
+ *reinterpret_cast<rocksdb::Options*>(joptions_handle));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: getColumnFamilyOptionsFromProps
* Signature: (Ljava/util/String;)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps(
- JNIEnv* env, jclass /*jclazz*/, jstring jopt_string) {
+ JNIEnv* env, jclass, jstring jopt_string) {
const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
if (opt_string == nullptr) {
// exception thrown: OutOfMemoryError
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong handle) {
+void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(
+ JNIEnv*, jobject, jlong handle) {
auto* cfo = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(handle);
assert(cfo != nullptr);
delete cfo;
* Method: optimizeForSmallDb
* Signature: (J)V
*/
-void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb(
+ JNIEnv*, jobject, jlong jhandle) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->OptimizeForSmallDb();
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_optimizeForPointLookup(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong block_cache_size_mb) {
+ JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->OptimizeForPointLookup(block_cache_size_mb);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_optimizeLevelStyleCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong memtable_memory_budget) {
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->OptimizeLevelStyleCompaction(memtable_memory_budget);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_optimizeUniversalStyleCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong memtable_memory_budget) {
+ JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->OptimizeUniversalStyleCompaction(memtable_memory_budget);
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JI(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint builtinComparator) {
+ JNIEnv*, jobject, jlong jhandle, jint builtinComparator) {
switch (builtinComparator) {
case 1:
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->comparator =
* Signature: (JJB)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJB(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle,
- jlong jcomparator_handle, jbyte jcomparator_type) {
+ JNIEnv*, jobject, jlong jopt_handle, jlong jcomparator_handle,
+ jbyte jcomparator_type) {
rocksdb::Comparator* comparator = nullptr;
switch (jcomparator_type) {
// JAVA_COMPARATOR
* Signature: (JJjava/lang/String)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle, jstring jop_name) {
+ JNIEnv* env, jobject, jlong jhandle, jstring jop_name) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
const char* op_name = env->GetStringUTFChars(jop_name, nullptr);
if (op_name == nullptr) {
* Signature: (JJjava/lang/String)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperator(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong mergeOperatorHandle) {
+ JNIEnv*, jobject, jlong jhandle, jlong mergeOperatorHandle) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->merge_operator =
*(reinterpret_cast<std::shared_ptr<rocksdb::MergeOperator>*>(
mergeOperatorHandle));
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterHandle(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle,
- jlong jcompactionfilter_handle) {
+ JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilter_handle) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jopt_handle)
->compaction_filter =
reinterpret_cast<rocksdb::CompactionFilter*>(jcompactionfilter_handle);
* Method: setCompactionFilterFactoryHandle
* Signature: (JJ)V
*/
-void JNICALL
-Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterFactoryHandle(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jopt_handle,
+void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterFactoryHandle(
+ JNIEnv*, jobject, jlong jopt_handle,
jlong jcompactionfilterfactory_handle) {
auto* cff_factory =
reinterpret_cast<std::shared_ptr<rocksdb::CompactionFilterFactoryJniCallback>*>(
* Signature: (JJ)I
*/
void Java_org_rocksdb_ColumnFamilyOptions_setWriteBufferSize(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_buffer_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size);
+ JNIEnv* env, jobject, jlong jhandle, jlong jwrite_buffer_size) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jwrite_buffer_size);
if (s.ok()) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->write_buffer_size = jwrite_buffer_size;
* Method: writeBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->write_buffer_size;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumber(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jmax_write_buffer_number) {
+ JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_write_buffer_number = jmax_write_buffer_number;
}
* Method: maxWriteBufferNumber
* Signature: (J)I
*/
-jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_write_buffer_number;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jfactory_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->memtable_factory.reset(
reinterpret_cast<rocksdb::MemTableRepFactory*>(jfactory_handle));
* Signature: (J)Ljava/lang/String
*/
jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv* env, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get();
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_useFixedLengthPrefixExtractor(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jprefix_length) {
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->prefix_extractor.reset(
rocksdb::NewFixedPrefixTransform(static_cast<int>(jprefix_length)));
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_useCappedPrefixExtractor(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jprefix_length) {
+ JNIEnv*, jobject, jlong jhandle, jint jprefix_length) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->prefix_extractor.reset(
rocksdb::NewCappedPrefixTransform(static_cast<int>(jprefix_length)));
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jfactory_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->table_factory.reset(
reinterpret_cast<rocksdb::TableFactory*>(jfactory_handle));
}
* Method: tableFactoryName
* Signature: (J)Ljava/lang/String
*/
-jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
rocksdb::TableFactory* tf = opt->table_factory.get();
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_minWriteBufferNumberToMerge(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->min_write_buffer_number_to_merge;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMinWriteBufferNumberToMerge(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jmin_write_buffer_number_to_merge) {
+ JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->min_write_buffer_number_to_merge =
static_cast<int>(jmin_write_buffer_number_to_merge);
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumberToMaintain(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_write_buffer_number_to_maintain;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jmax_write_buffer_number_to_maintain) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_write_buffer_number_to_maintain =
* Signature: (JB)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jcompression_type_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_opts->compression = rocksdb::CompressionTypeJni::toCppCompressionType(
jcompression_type_value);
* Method: compressionType
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(
+ JNIEnv*, jobject, jlong jhandle) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb::CompressionTypeJni::toJavaCompressionType(
cf_opts->compression);
* Signature: (J[B)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jbyteArray jcompressionLevels) {
+ JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) {
auto* options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto uptr_compression_levels =
rocksdb_compression_vector_helper(env, jcompressionLevels);
* Signature: (J)[B
*/
jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv* env, jobject, jlong jhandle) {
auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb_compression_list_helper(env,
cf_options->compression_per_level);
* Signature: (JB)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionType(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jcompression_type_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) {
auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_options->bottommost_compression =
rocksdb::CompressionTypeJni::toCppCompressionType(
* Signature: (J)B
*/
jbyte Java_org_rocksdb_ColumnFamilyOptions_bottommostCompressionType(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb::CompressionTypeJni::toJavaCompressionType(
cf_options->bottommost_compression);
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionOptions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jbottommost_compression_options_handle) {
auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto* bottommost_compression_options =
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompressionOptions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jcompression_options_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) {
auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto* compression_options = reinterpret_cast<rocksdb::CompressionOptions*>(
jcompression_options_handle);
* Signature: (JB)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompactionStyle(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jbyte compaction_style) {
- reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->compaction_style =
- static_cast<rocksdb::CompactionStyle>(compaction_style);
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_style) {
+ auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
+ cf_options->compaction_style =
+ rocksdb::CompactionStyleJni::toCppCompactionStyle(jcompaction_style);
}
/*
* Method: compactionStyle
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
- ->compaction_style;
+jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
+ return rocksdb::CompactionStyleJni::toJavaCompactionStyle(
+ cf_options->compaction_style);
}
/*
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxTableFilesSizeFIFO(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jmax_table_files_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->compaction_options_fifo.max_table_files_size =
static_cast<uint64_t>(jmax_table_files_size);
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_maxTableFilesSizeFIFO(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->compaction_options_fifo.max_table_files_size;
}
* Method: numLevels
* Signature: (J)I
*/
-jint Java_org_rocksdb_ColumnFamilyOptions_numLevels(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_ColumnFamilyOptions_numLevels(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->num_levels;
}
* Method: setNumLevels
* Signature: (JI)V
*/
-void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint jnum_levels) {
+void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels(
+ JNIEnv*, jobject, jlong jhandle, jint jnum_levels) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->num_levels =
static_cast<int>(jnum_levels);
}
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroFileNumCompactionTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_file_num_compaction_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroFileNumCompactionTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jlevel0_file_num_compaction_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_file_num_compaction_trigger =
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroSlowdownWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_slowdown_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroSlowdownWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_slowdown_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_slowdown_writes_trigger =
static_cast<int>(jlevel0_slowdown_writes_trigger);
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroStopWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_stop_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_stop_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_stop_writes_trigger =
static_cast<int>(jlevel0_stop_writes_trigger);
* Method: targetFileSizeBase
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->target_file_size_base;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeBase(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jtarget_file_size_base) {
+ JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->target_file_size_base = static_cast<uint64_t>(jtarget_file_size_base);
}
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeMultiplier(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->target_file_size_multiplier;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeMultiplier(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jtarget_file_size_multiplier) {
+ JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->target_file_size_multiplier =
static_cast<int>(jtarget_file_size_multiplier);
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelBase(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_bytes_for_level_base;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelBase(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jmax_bytes_for_level_base) {
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_bytes_for_level_base =
static_cast<int64_t>(jmax_bytes_for_level_base);
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_levelCompactionDynamicLevelBytes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level_compaction_dynamic_level_bytes;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevelCompactionDynamicLevelBytes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jenable_dynamic_level_bytes) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes);
}
* Signature: (J)D
*/
jdouble Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplier(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_bytes_for_level_multiplier;
}
* Signature: (JD)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jdouble jmax_bytes_for_level_multiplier) {
+ JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_bytes_for_level_multiplier =
static_cast<double>(jmax_bytes_for_level_multiplier);
* Method: maxCompactionBytes
* Signature: (J)I
*/
-jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(
+ JNIEnv*, jobject, jlong jhandle) {
return static_cast<jlong>(
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_compaction_bytes);
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jmax_compaction_bytes) {
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_compaction_bytes = static_cast<uint64_t>(jmax_compaction_bytes);
}
* Method: arenaBlockSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->arena_block_size;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setArenaBlockSize(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jarena_block_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size);
+ JNIEnv* env, jobject, jlong jhandle, jlong jarena_block_size) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(jarena_block_size);
if (s.ok()) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->arena_block_size =
jarena_block_size;
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_disableAutoCompactions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->disable_auto_compactions;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jdisable_auto_compactions) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->disable_auto_compactions = static_cast<bool>(jdisable_auto_compactions);
}
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_maxSequentialSkipInIterations(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_sequential_skip_in_iterations;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxSequentialSkipInIterations(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jmax_sequential_skip_in_iterations) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_sequential_skip_in_iterations =
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateSupport(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->inplace_update_support;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateSupport(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jinplace_update_support) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->inplace_update_support = static_cast<bool>(jinplace_update_support);
}
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateNumLocks(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->inplace_update_num_locks;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong jinplace_update_num_locks) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(jinplace_update_num_locks);
+ JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jinplace_update_num_locks);
if (s.ok()) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->inplace_update_num_locks = jinplace_update_num_locks;
* Signature: (J)I
*/
jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->memtable_prefix_bloom_size_ratio;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jdouble jmemtable_prefix_bloom_size_ratio) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->memtable_prefix_bloom_size_ratio =
* Method: bloomLocality
* Signature: (J)I
*/
-jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->bloom_locality;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setBloomLocality(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint jbloom_locality) {
+ JNIEnv*, jobject, jlong jhandle, jint jbloom_locality) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->bloom_locality =
static_cast<int32_t>(jbloom_locality);
}
* Method: maxSuccessiveMerges
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_successive_merges;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxSuccessiveMerges(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong jmax_successive_merges) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(jmax_successive_merges);
+ JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jmax_successive_merges);
if (s.ok()) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_successive_merges = jmax_successive_merges;
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_optimizeFiltersForHits(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->optimize_filters_for_hits;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean joptimize_filters_for_hits) {
+ JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->optimize_filters_for_hits =
static_cast<bool>(joptimize_filters_for_hits);
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_memtableHugePageSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->memtable_huge_page_size;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMemtableHugePageSize(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong jmemtable_huge_page_size) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(jmemtable_huge_page_size);
+ JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(jmemtable_huge_page_size);
if (s.ok()) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->memtable_huge_page_size = jmemtable_huge_page_size;
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_softPendingCompactionBytesLimit(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->soft_pending_compaction_bytes_limit;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setSoftPendingCompactionBytesLimit(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jsoft_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->soft_pending_compaction_bytes_limit =
* Signature: (J)J
*/
jlong Java_org_rocksdb_ColumnFamilyOptions_hardPendingCompactionBytesLimit(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->hard_pending_compaction_bytes_limit;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setHardPendingCompactionBytesLimit(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jhard_pending_compaction_bytes_limit) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->hard_pending_compaction_bytes_limit =
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_level0FileNumCompactionTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_file_num_compaction_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevel0FileNumCompactionTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jint jlevel0_file_num_compaction_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_file_num_compaction_trigger =
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_level0SlowdownWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_slowdown_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevel0SlowdownWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_slowdown_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_slowdown_writes_trigger =
static_cast<int32_t>(jlevel0_slowdown_writes_trigger);
* Signature: (J)I
*/
jint Java_org_rocksdb_ColumnFamilyOptions_level0StopWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_stop_writes_trigger;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setLevel0StopWritesTrigger(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jlevel0_stop_writes_trigger) {
+ JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->level0_stop_writes_trigger =
static_cast<int32_t>(jlevel0_stop_writes_trigger);
* Method: maxBytesForLevelMultiplierAdditional
* Signature: (J)[I
*/
-jintArray
-Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle) {
+jintArray Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional(
+ JNIEnv* env, jobject, jlong jhandle) {
auto mbflma = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->max_bytes_for_level_multiplier_additional;
* Signature: (J[I)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditional(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
+ JNIEnv* env, jobject, jlong jhandle,
jintArray jmax_bytes_for_level_multiplier_additional) {
jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional);
jint* additionals =
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_paranoidFileChecks(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->paranoid_file_checks;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jparanoid_file_checks) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
->paranoid_file_checks = static_cast<bool>(jparanoid_file_checks);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompactionPriority(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jcompaction_priority_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_opts->compaction_pri =
rocksdb::CompactionPriorityJni::toCppCompactionPriority(
* Method: compactionPriority
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority(
+ JNIEnv*, jobject, jlong jhandle) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return rocksdb::CompactionPriorityJni::toJavaCompactionPriority(
cf_opts->compaction_pri);
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setReportBgIoStats(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jreport_bg_io_stats) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jreport_bg_io_stats) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_opts->report_bg_io_stats = static_cast<bool>(jreport_bg_io_stats);
}
* Method: reportBgIoStats
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats(
+ JNIEnv*, jobject, jlong jhandle) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return static_cast<bool>(cf_opts->report_bg_io_stats);
}
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: setTtl
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ColumnFamilyOptions_setTtl(
+ JNIEnv*, jobject, jlong jhandle, jlong jttl) {
+ auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
+ cf_opts->ttl = static_cast<uint64_t>(jttl);
+}
+
+/*
+ * Class: org_rocksdb_ColumnFamilyOptions
+ * Method: ttl
+ * Signature: (J)J
+ */
+JNIEXPORT jlong JNICALL Java_org_rocksdb_ColumnFamilyOptions_ttl(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
+ return static_cast<jlong>(cf_opts->ttl);
+}
+
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setCompactionOptionsUniversal
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsUniversal(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jlong jcompaction_options_universal_handle) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto* opts_uni = reinterpret_cast<rocksdb::CompactionOptionsUniversal*>(
* Signature: (JJ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsFIFO(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jcompaction_options_fifo_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
auto* opts_fifo = reinterpret_cast<rocksdb::CompactionOptionsFIFO*>(
jcompaction_options_fifo_handle);
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setForceConsistencyChecks(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jforce_consistency_checks) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
cf_opts->force_consistency_checks =
static_cast<bool>(jforce_consistency_checks);
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_forceConsistencyChecks(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* cf_opts = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle);
return static_cast<bool>(cf_opts->force_consistency_checks);
}
* Method: newDBOptions
* Signature: ()J
*/
-jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* /*env*/, jclass /*jcls*/) {
+jlong Java_org_rocksdb_DBOptions_newDBOptions(
+ JNIEnv*, jclass) {
auto* dbop = new rocksdb::DBOptions();
return reinterpret_cast<jlong>(dbop);
}
* Method: copyDBOptions
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_copyDBOptions(JNIEnv* /*env*/, jclass /*jcls*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_copyDBOptions(
+ JNIEnv*, jclass, jlong jhandle) {
auto new_opt =
new rocksdb::DBOptions(*(reinterpret_cast<rocksdb::DBOptions*>(jhandle)));
return reinterpret_cast<jlong>(new_opt);
}
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: newDBOptionsFromOptions
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_newDBOptionsFromOptions(
+ JNIEnv*, jclass, jlong joptions_handle) {
+ auto new_opt =
+ new rocksdb::DBOptions(*reinterpret_cast<rocksdb::Options*>(joptions_handle));
+ return reinterpret_cast<jlong>(new_opt);
+}
+
/*
* Class: org_rocksdb_DBOptions
* Method: getDBOptionsFromProps
* Signature: (Ljava/util/String;)J
*/
-jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(JNIEnv* env,
- jclass /*jclazz*/,
- jstring jopt_string) {
+jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps(
+ JNIEnv* env, jclass, jstring jopt_string) {
const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr);
if (opt_string == nullptr) {
// exception thrown: OutOfMemoryError
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_DBOptions_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong handle) {
+void Java_org_rocksdb_DBOptions_disposeInternal(
+ JNIEnv*, jobject, jlong handle) {
auto* dbo = reinterpret_cast<rocksdb::DBOptions*>(handle);
assert(dbo != nullptr);
delete dbo;
* Method: optimizeForSmallDb
* Signature: (J)V
*/
-void Java_org_rocksdb_DBOptions_optimizeForSmallDb(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_DBOptions_optimizeForSmallDb(
+ JNIEnv*, jobject, jlong jhandle) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->OptimizeForSmallDb();
}
* Method: setEnv
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setEnv(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jlong jenv_handle) {
+void Java_org_rocksdb_DBOptions_setEnv(
+ JNIEnv*, jobject, jlong jhandle, jlong jenv_handle) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->env =
reinterpret_cast<rocksdb::Env*>(jenv_handle);
}
* Method: setIncreaseParallelism
* Signature: (JI)V
*/
-void Java_org_rocksdb_DBOptions_setIncreaseParallelism(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint totalThreads) {
+void Java_org_rocksdb_DBOptions_setIncreaseParallelism(
+ JNIEnv*, jobject, jlong jhandle, jint totalThreads) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->IncreaseParallelism(
static_cast<int>(totalThreads));
}
* Method: setCreateIfMissing
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setCreateIfMissing(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean flag) {
+void Java_org_rocksdb_DBOptions_setCreateIfMissing(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->create_if_missing = flag;
}
* Method: createIfMissing
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_createIfMissing(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_createIfMissing(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->create_if_missing;
}
* Method: setCreateMissingColumnFamilies
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean flag) {
+void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies(
+ JNIEnv*, jobject, jlong jhandle, jboolean flag) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->create_missing_column_families = flag;
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_DBOptions_createMissingColumnFamilies(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->create_missing_column_families;
}
* Method: setErrorIfExists
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setErrorIfExists(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean error_if_exists) {
+void Java_org_rocksdb_DBOptions_setErrorIfExists(
+ JNIEnv*, jobject, jlong jhandle, jboolean error_if_exists) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->error_if_exists =
static_cast<bool>(error_if_exists);
}
* Method: errorIfExists
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_errorIfExists(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_errorIfExists(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->error_if_exists;
}
* Method: setParanoidChecks
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setParanoidChecks(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean paranoid_checks) {
+void Java_org_rocksdb_DBOptions_setParanoidChecks(
+ JNIEnv*, jobject, jlong jhandle, jboolean paranoid_checks) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->paranoid_checks =
static_cast<bool>(paranoid_checks);
}
* Method: paranoidChecks
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_paranoidChecks(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_paranoidChecks(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->paranoid_checks;
}
* Method: setRateLimiter
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setRateLimiter(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jlong jrate_limiter_handle) {
+void Java_org_rocksdb_DBOptions_setRateLimiter(
+ JNIEnv*, jobject, jlong jhandle, jlong jrate_limiter_handle) {
std::shared_ptr<rocksdb::RateLimiter>* pRateLimiter =
reinterpret_cast<std::shared_ptr<rocksdb::RateLimiter>*>(
jrate_limiter_handle);
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setSstFileManager(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jsst_file_manager_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) {
auto* sptr_sst_file_manager =
reinterpret_cast<std::shared_ptr<rocksdb::SstFileManager>*>(
jsst_file_manager_handle);
* Method: setLogger
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setLogger(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jlong jlogger_handle) {
+void Java_org_rocksdb_DBOptions_setLogger(
+ JNIEnv*, jobject, jlong jhandle, jlong jlogger_handle) {
std::shared_ptr<rocksdb::LoggerJniCallback>* pLogger =
reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback>*>(
jlogger_handle);
* Method: setInfoLogLevel
* Signature: (JB)V
*/
-void Java_org_rocksdb_DBOptions_setInfoLogLevel(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jbyte jlog_level) {
+void Java_org_rocksdb_DBOptions_setInfoLogLevel(
+ JNIEnv*, jobject, jlong jhandle, jbyte jlog_level) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->info_log_level =
static_cast<rocksdb::InfoLogLevel>(jlog_level);
}
* Method: infoLogLevel
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_DBOptions_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_DBOptions_infoLogLevel(
+ JNIEnv*, jobject, jlong jhandle) {
return static_cast<jbyte>(
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->info_log_level);
}
* Method: setMaxTotalWalSize
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setMaxTotalWalSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jmax_total_wal_size) {
+void Java_org_rocksdb_DBOptions_setMaxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_total_wal_size) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_total_wal_size =
static_cast<jlong>(jmax_total_wal_size);
}
* Method: maxTotalWalSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_maxTotalWalSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_maxTotalWalSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_total_wal_size;
}
* Method: setMaxOpenFiles
* Signature: (JI)V
*/
-void Java_org_rocksdb_DBOptions_setMaxOpenFiles(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jint max_open_files) {
+void Java_org_rocksdb_DBOptions_setMaxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle, jint max_open_files) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_open_files =
static_cast<int>(max_open_files);
}
* Method: maxOpenFiles
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_maxOpenFiles(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_maxOpenFiles(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_open_files;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_DBOptions_setMaxFileOpeningThreads(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jmax_file_opening_threads) {
+ JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_file_opening_threads =
static_cast<int>(jmax_file_opening_threads);
}
* Method: maxFileOpeningThreads
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<int>(opt->max_file_opening_threads);
}
* Method: setStatistics
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setStatistics(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jlong jstatistics_handle) {
+void Java_org_rocksdb_DBOptions_setStatistics(
+ JNIEnv*, jobject, jlong jhandle, jlong jstatistics_handle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
auto* pSptr = reinterpret_cast<std::shared_ptr<rocksdb::StatisticsJni>*>(
jstatistics_handle);
* Method: statistics
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_statistics(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_statistics(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
std::shared_ptr<rocksdb::Statistics> sptr = opt->statistics;
if (sptr == nullptr) {
* Method: setUseFsync
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setUseFsync(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jboolean use_fsync) {
+void Java_org_rocksdb_DBOptions_setUseFsync(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_fsync) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_fsync =
static_cast<bool>(use_fsync);
}
* Method: useFsync
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_useFsync(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_useFsync(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_fsync;
}
* Method: setDbPaths
* Signature: (J[Ljava/lang/String;[J)V
*/
-void Java_org_rocksdb_DBOptions_setDbPaths(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle, jobjectArray jpaths,
- jlongArray jtarget_sizes) {
+void Java_org_rocksdb_DBOptions_setDbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
std::vector<rocksdb::DbPath> db_paths;
jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
if (ptr_jtarget_size == nullptr) {
* Method: dbPathsLen
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_dbPathsLen(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_dbPathsLen(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jlong>(opt->db_paths.size());
}
* Method: dbPaths
* Signature: (J[Ljava/lang/String;[J)V
*/
-void Java_org_rocksdb_DBOptions_dbPaths(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle, jobjectArray jpaths,
- jlongArray jtarget_sizes) {
+void Java_org_rocksdb_DBOptions_dbPaths(
+ JNIEnv* env, jobject, jlong jhandle, jobjectArray jpaths,
+ jlongArray jtarget_sizes) {
jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr);
if (ptr_jtarget_size == nullptr) {
// exception thrown: OutOfMemoryError
* Method: setDbLogDir
* Signature: (JLjava/lang/String)V
*/
-void Java_org_rocksdb_DBOptions_setDbLogDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle,
- jstring jdb_log_dir) {
+void Java_org_rocksdb_DBOptions_setDbLogDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jdb_log_dir) {
const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr);
if (log_dir == nullptr) {
// exception thrown: OutOfMemoryError
* Method: dbLogDir
* Signature: (J)Ljava/lang/String
*/
-jstring Java_org_rocksdb_DBOptions_dbLogDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_DBOptions_dbLogDir(
+ JNIEnv* env, jobject, jlong jhandle) {
return env->NewStringUTF(
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->db_log_dir.c_str());
}
* Method: setWalDir
* Signature: (JLjava/lang/String)V
*/
-void Java_org_rocksdb_DBOptions_setWalDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle, jstring jwal_dir) {
+void Java_org_rocksdb_DBOptions_setWalDir(
+ JNIEnv* env, jobject, jlong jhandle, jstring jwal_dir) {
const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0);
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->wal_dir.assign(wal_dir);
env->ReleaseStringUTFChars(jwal_dir, wal_dir);
* Method: walDir
* Signature: (J)Ljava/lang/String
*/
-jstring Java_org_rocksdb_DBOptions_walDir(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_DBOptions_walDir(
+ JNIEnv* env, jobject, jlong jhandle) {
return env->NewStringUTF(
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->wal_dir.c_str());
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setDeleteObsoleteFilesPeriodMicros(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong micros) {
+ JNIEnv*, jobject, jlong jhandle, jlong micros) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->delete_obsolete_files_period_micros = static_cast<int64_t>(micros);
}
* Signature: (J)J
*/
jlong Java_org_rocksdb_DBOptions_deleteObsoleteFilesPeriodMicros(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->delete_obsolete_files_period_micros;
}
* Method: setBaseBackgroundCompactions
* Signature: (JI)V
*/
-void Java_org_rocksdb_DBOptions_setBaseBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint max) {
+void Java_org_rocksdb_DBOptions_setBaseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->base_background_compactions =
static_cast<int>(max);
}
* Method: baseBackgroundCompactions
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_baseBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_baseBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->base_background_compactions;
}
* Method: setMaxBackgroundCompactions
* Signature: (JI)V
*/
-void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint max) {
+void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_background_compactions =
static_cast<int>(max);
}
* Method: maxBackgroundCompactions
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->max_background_compactions;
}
* Method: setMaxSubcompactions
* Signature: (JI)V
*/
-void Java_org_rocksdb_DBOptions_setMaxSubcompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle, jint max) {
+void Java_org_rocksdb_DBOptions_setMaxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle, jint max) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_subcompactions =
static_cast<int32_t>(max);
}
* Method: maxSubcompactions
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_maxSubcompactions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_maxSubcompactions(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_subcompactions;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_DBOptions_setMaxBackgroundFlushes(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint max_background_flushes) {
+ JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_background_flushes =
static_cast<int>(max_background_flushes);
}
* Method: maxBackgroundFlushes
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_background_flushes;
}
* Method: setMaxBackgroundJobs
* Signature: (JI)V
*/
-void Java_org_rocksdb_DBOptions_setMaxBackgroundJobs(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jint max_background_jobs) {
+void Java_org_rocksdb_DBOptions_setMaxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle, jint max_background_jobs) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_background_jobs =
static_cast<int>(max_background_jobs);
}
* Method: maxBackgroundJobs
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_maxBackgroundJobs(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_maxBackgroundJobs(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_background_jobs;
}
* Method: setMaxLogFileSize
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setMaxLogFileSize(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle,
- jlong max_log_file_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size);
+void Java_org_rocksdb_DBOptions_setMaxLogFileSize(
+ JNIEnv* env, jobject, jlong jhandle, jlong max_log_file_size) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(max_log_file_size);
if (s.ok()) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_log_file_size =
max_log_file_size;
* Method: maxLogFileSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_maxLogFileSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_maxLogFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_log_file_size;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setLogFileTimeToRoll(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong log_file_time_to_roll) {
- rocksdb::Status s =
- rocksdb::check_if_jlong_fits_size_t(log_file_time_to_roll);
+ JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) {
+ auto s =
+ rocksdb::JniUtil::check_if_jlong_fits_size_t(log_file_time_to_roll);
if (s.ok()) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->log_file_time_to_roll =
log_file_time_to_roll;
* Method: logFileTimeToRoll
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->log_file_time_to_roll;
}
* Method: setKeepLogFileNum
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setKeepLogFileNum(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle,
- jlong keep_log_file_num) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num);
+void Java_org_rocksdb_DBOptions_setKeepLogFileNum(
+ JNIEnv* env, jobject, jlong jhandle, jlong keep_log_file_num) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num);
if (s.ok()) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->keep_log_file_num =
keep_log_file_num;
* Method: keepLogFileNum
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_keepLogFileNum(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_keepLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->keep_log_file_num;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setRecycleLogFileNum(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong recycle_log_file_num) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num);
+ JNIEnv* env, jobject, jlong jhandle, jlong recycle_log_file_num) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(recycle_log_file_num);
if (s.ok()) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->recycle_log_file_num =
recycle_log_file_num;
* Method: recycleLogFileNum
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->recycle_log_file_num;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setMaxManifestFileSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong max_manifest_file_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_manifest_file_size =
static_cast<int64_t>(max_manifest_file_size);
}
* Method: maxManifestFileSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_maxManifestFileSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_maxManifestFileSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->max_manifest_file_size;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_DBOptions_setTableCacheNumshardbits(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint table_cache_numshardbits) {
+ JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->table_cache_numshardbits =
static_cast<int>(table_cache_numshardbits);
}
* Method: tableCacheNumshardbits
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->table_cache_numshardbits;
}
* Method: setWalTtlSeconds
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setWalTtlSeconds(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong WAL_ttl_seconds) {
+void Java_org_rocksdb_DBOptions_setWalTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_ttl_seconds) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_ttl_seconds =
static_cast<int64_t>(WAL_ttl_seconds);
}
* Method: walTtlSeconds
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_walTtlSeconds(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_walTtlSeconds(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_ttl_seconds;
}
* Method: setWalSizeLimitMB
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setWalSizeLimitMB(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong WAL_size_limit_MB) {
+void Java_org_rocksdb_DBOptions_setWalSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle, jlong WAL_size_limit_MB) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_size_limit_MB =
static_cast<int64_t>(WAL_size_limit_MB);
}
* Method: walTtlSeconds
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_walSizeLimitMB(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_walSizeLimitMB(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->WAL_size_limit_MB;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setManifestPreallocationSize(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle,
- jlong preallocation_size) {
- rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size);
+ JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) {
+ auto s = rocksdb::JniUtil::check_if_jlong_fits_size_t(preallocation_size);
if (s.ok()) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->manifest_preallocation_size = preallocation_size;
* Method: manifestPreallocationSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->manifest_preallocation_size;
}
* Method: useDirectReads
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_useDirectReads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_useDirectReads(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_direct_reads;
}
* Method: setUseDirectReads
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setUseDirectReads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean use_direct_reads) {
+void Java_org_rocksdb_DBOptions_setUseDirectReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean use_direct_reads) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_direct_reads =
static_cast<bool>(use_direct_reads);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_DBOptions_useDirectIoForFlushAndCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->use_direct_io_for_flush_and_compaction;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setUseDirectIoForFlushAndCompaction(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean use_direct_io_for_flush_and_compaction) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->use_direct_io_for_flush_and_compaction =
* Method: setAllowFAllocate
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setAllowFAllocate(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean jallow_fallocate) {
+void Java_org_rocksdb_DBOptions_setAllowFAllocate(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_fallocate) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_fallocate =
static_cast<bool>(jallow_fallocate);
}
* Method: allowFAllocate
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_allowFAllocate(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_allowFAllocate(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jboolean>(opt->allow_fallocate);
}
* Method: setAllowMmapReads
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setAllowMmapReads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean allow_mmap_reads) {
+void Java_org_rocksdb_DBOptions_setAllowMmapReads(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_reads) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_reads =
static_cast<bool>(allow_mmap_reads);
}
* Method: allowMmapReads
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_allowMmapReads(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_allowMmapReads(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_reads;
}
* Method: setAllowMmapWrites
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setAllowMmapWrites(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean allow_mmap_writes) {
+void Java_org_rocksdb_DBOptions_setAllowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle, jboolean allow_mmap_writes) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_writes =
static_cast<bool>(allow_mmap_writes);
}
* Method: allowMmapWrites
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_allowMmapWrites(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_allowMmapWrites(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->allow_mmap_writes;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setIsFdCloseOnExec(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean is_fd_close_on_exec) {
+ JNIEnv*, jobject, jlong jhandle, jboolean is_fd_close_on_exec) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->is_fd_close_on_exec =
static_cast<bool>(is_fd_close_on_exec);
}
* Method: isFdCloseOnExec
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->is_fd_close_on_exec;
}
* Signature: (JI)V
*/
void Java_org_rocksdb_DBOptions_setStatsDumpPeriodSec(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint stats_dump_period_sec) {
+ JNIEnv*, jobject, jlong jhandle, jint stats_dump_period_sec) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->stats_dump_period_sec =
static_cast<int>(stats_dump_period_sec);
}
* Method: statsDumpPeriodSec
* Signature: (J)I
*/
-jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->stats_dump_period_sec;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setAdviseRandomOnOpen(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean advise_random_on_open) {
+ JNIEnv*, jobject, jlong jhandle, jboolean advise_random_on_open) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->advise_random_on_open =
static_cast<bool>(advise_random_on_open);
}
* Method: adviseRandomOnOpen
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->advise_random_on_open;
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setDbWriteBufferSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jdb_write_buffer_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jdb_write_buffer_size) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->db_write_buffer_size = static_cast<size_t>(jdb_write_buffer_size);
}
* Method: setWriteBufferManager
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setWriteBufferManager(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jdb_options_handle,
- jlong jwrite_buffer_manager_handle) {
+void Java_org_rocksdb_DBOptions_setWriteBufferManager(
+ JNIEnv*, jobject, jlong jdb_options_handle,
+ jlong jwrite_buffer_manager_handle) {
auto* write_buffer_manager =
reinterpret_cast<std::shared_ptr<rocksdb::WriteBufferManager> *>(jwrite_buffer_manager_handle);
reinterpret_cast<rocksdb::DBOptions*>(jdb_options_handle)->write_buffer_manager =
* Method: dbWriteBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jlong>(opt->db_write_buffer_size);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_DBOptions_setAccessHintOnCompactionStart(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jaccess_hint_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jaccess_hint_value) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->access_hint_on_compaction_start =
rocksdb::AccessHintJni::toCppAccessHint(jaccess_hint_value);
* Method: accessHintOnCompactionStart
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return rocksdb::AccessHintJni::toJavaAccessHint(
opt->access_hint_on_compaction_start);
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setNewTableReaderForCompactionInputs(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean jnew_table_reader_for_compaction_inputs) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->new_table_reader_for_compaction_inputs =
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_DBOptions_newTableReaderForCompactionInputs(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setCompactionReadaheadSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jcompaction_readahead_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jcompaction_readahead_size) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->compaction_readahead_size =
static_cast<size_t>(jcompaction_readahead_size);
* Method: compactionReadaheadSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jlong>(opt->compaction_readahead_size);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setRandomAccessMaxBufferSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jrandom_access_max_buffer_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->random_access_max_buffer_size =
static_cast<size_t>(jrandom_access_max_buffer_size);
* Method: randomAccessMaxBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jlong>(opt->random_access_max_buffer_size);
}
* Signature: (JJ)V
*/
void Java_org_rocksdb_DBOptions_setWritableFileMaxBufferSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwritable_file_max_buffer_size) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwritable_file_max_buffer_size) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->writable_file_max_buffer_size =
static_cast<size_t>(jwritable_file_max_buffer_size);
* Method: writableFileMaxBufferSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jlong>(opt->writable_file_max_buffer_size);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setUseAdaptiveMutex(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean use_adaptive_mutex) {
+ JNIEnv*, jobject, jlong jhandle, jboolean use_adaptive_mutex) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_adaptive_mutex =
static_cast<bool>(use_adaptive_mutex);
}
* Method: useAdaptiveMutex
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->use_adaptive_mutex;
}
* Method: setBytesPerSync
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setBytesPerSync(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jlong bytes_per_sync) {
+void Java_org_rocksdb_DBOptions_setBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong bytes_per_sync) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->bytes_per_sync =
static_cast<int64_t>(bytes_per_sync);
}
* Method: bytesPerSync
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_bytesPerSync(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_bytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)->bytes_per_sync;
}
* Method: setWalBytesPerSync
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setWalBytesPerSync(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jwal_bytes_per_sync) {
+void Java_org_rocksdb_DBOptions_setWalBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_bytes_per_sync) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->wal_bytes_per_sync =
static_cast<int64_t>(jwal_bytes_per_sync);
}
* Method: walBytesPerSync
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_walBytesPerSync(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_walBytesPerSync(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jlong>(opt->wal_bytes_per_sync);
}
/*
* Class: org_rocksdb_DBOptions
- * Method: setEnableThreadTracking
+ * Method: setDelayedWriteRate
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setDelayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle, jlong jdelayed_write_rate) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ opt->delayed_write_rate = static_cast<uint64_t>(jdelayed_write_rate);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: delayedWriteRate
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_DBOptions_delayedWriteRate(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ return static_cast<jlong>(opt->delayed_write_rate);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setEnablePipelinedWrite
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setEnableThreadTracking(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jenable_thread_tracking) {
+void Java_org_rocksdb_DBOptions_setEnablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
- opt->enable_thread_tracking = static_cast<bool>(jenable_thread_tracking);
+ opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE;
}
/*
* Class: org_rocksdb_DBOptions
- * Method: enableThreadTracking
+ * Method: enablePipelinedWrite
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_enableThreadTracking(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_enablePipelinedWrite(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
- return static_cast<jboolean>(opt->enable_thread_tracking);
+ return static_cast<jboolean>(opt->enable_pipelined_write);
}
/*
* Class: org_rocksdb_DBOptions
- * Method: setDelayedWriteRate
- * Signature: (JJ)V
+ * Method: setEnableThreadTracking
+ * Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setDelayedWriteRate(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jdelayed_write_rate) {
+void Java_org_rocksdb_DBOptions_setEnableThreadTracking(
+ JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
- opt->delayed_write_rate = static_cast<uint64_t>(jdelayed_write_rate);
+ opt->enable_thread_tracking = jenable_thread_tracking == JNI_TRUE;
}
/*
* Class: org_rocksdb_DBOptions
- * Method: delayedWriteRate
- * Signature: (J)J
+ * Method: enableThreadTracking
+ * Signature: (J)Z
*/
-jlong Java_org_rocksdb_DBOptions_delayedWriteRate(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_enableThreadTracking(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
- return static_cast<jlong>(opt->delayed_write_rate);
+ return static_cast<jboolean>(opt->enable_thread_tracking);
}
/*
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setAllowConcurrentMemtableWrite(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean allow) {
+ JNIEnv*, jobject, jlong jhandle, jboolean allow) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->allow_concurrent_memtable_write = static_cast<bool>(allow);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_DBOptions_allowConcurrentMemtableWrite(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->allow_concurrent_memtable_write;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setEnableWriteThreadAdaptiveYield(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean yield) {
+ JNIEnv*, jobject, jlong jhandle, jboolean yield) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->enable_write_thread_adaptive_yield = static_cast<bool>(yield);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_DBOptions_enableWriteThreadAdaptiveYield(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->enable_write_thread_adaptive_yield;
}
* Method: setWriteThreadMaxYieldUsec
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong max) {
+void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong max) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->write_thread_max_yield_usec =
static_cast<int64_t>(max);
}
* Method: writeThreadMaxYieldUsec
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->write_thread_max_yield_usec;
}
* Method: setWriteThreadSlowYieldUsec
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong slow) {
+void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle, jlong slow) {
reinterpret_cast<rocksdb::DBOptions*>(jhandle)->write_thread_slow_yield_usec =
static_cast<int64_t>(slow);
}
* Method: writeThreadSlowYieldUsec
* Signature: (J)J
*/
-jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::DBOptions*>(jhandle)
->write_thread_slow_yield_usec;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setSkipStatsUpdateOnDbOpen(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jskip_stats_update_on_db_open) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jskip_stats_update_on_db_open) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->skip_stats_update_on_db_open =
static_cast<bool>(jskip_stats_update_on_db_open);
* Method: skipStatsUpdateOnDbOpen
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jboolean>(opt->skip_stats_update_on_db_open);
}
* Signature: (JB)V
*/
void Java_org_rocksdb_DBOptions_setWalRecoveryMode(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jbyte jwal_recovery_mode_value) {
+ JNIEnv*, jobject, jlong jhandle, jbyte jwal_recovery_mode_value) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->wal_recovery_mode = rocksdb::WALRecoveryModeJni::toCppWALRecoveryMode(
jwal_recovery_mode_value);
* Method: walRecoveryMode
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_DBOptions_walRecoveryMode(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_DBOptions_walRecoveryMode(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return rocksdb::WALRecoveryModeJni::toJavaWALRecoveryMode(
opt->wal_recovery_mode);
* Method: setAllow2pc
* Signature: (JZ)V
*/
-void Java_org_rocksdb_DBOptions_setAllow2pc(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jboolean jallow_2pc) {
+void Java_org_rocksdb_DBOptions_setAllow2pc(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_2pc) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->allow_2pc = static_cast<bool>(jallow_2pc);
}
* Method: allow2pc
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_allow2pc(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_allow2pc(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jboolean>(opt->allow_2pc);
}
* Method: setRowCache
* Signature: (JJ)V
*/
-void Java_org_rocksdb_DBOptions_setRowCache(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jlong jrow_cache_handle) {
+void Java_org_rocksdb_DBOptions_setRowCache(
+ JNIEnv*, jobject, jlong jhandle, jlong jrow_cache_handle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
auto* row_cache =
reinterpret_cast<std::shared_ptr<rocksdb::Cache>*>(jrow_cache_handle);
opt->row_cache = *row_cache;
}
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setWalFilter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_DBOptions_setWalFilter(
+ JNIEnv*, jobject, jlong jhandle, jlong jwal_filter_handle) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ auto* wal_filter =
+ reinterpret_cast<rocksdb::WalFilterJniCallback*>(jwal_filter_handle);
+ opt->wal_filter = wal_filter;
+}
+
/*
* Class: org_rocksdb_DBOptions
* Method: setFailIfOptionsFileError
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setFailIfOptionsFileError(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jfail_if_options_file_error) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->fail_if_options_file_error =
static_cast<bool>(jfail_if_options_file_error);
* Method: failIfOptionsFileError
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jboolean>(opt->fail_if_options_file_error);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setDumpMallocStats(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jdump_malloc_stats) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jdump_malloc_stats) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->dump_malloc_stats = static_cast<bool>(jdump_malloc_stats);
}
* Method: dumpMallocStats
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_dumpMallocStats(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_dumpMallocStats(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jboolean>(opt->dump_malloc_stats);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setAvoidFlushDuringRecovery(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean javoid_flush_during_recovery) {
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->avoid_flush_during_recovery =
static_cast<bool>(javoid_flush_during_recovery);
* Method: avoidFlushDuringRecovery
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jboolean>(opt->avoid_flush_during_recovery);
}
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAllowIngestBehind
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAllowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: allowIngestBehind
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_allowIngestBehind(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->allow_ingest_behind);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setPreserveDeletes
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setPreserveDeletes(
+ JNIEnv*, jobject, jlong jhandle, jboolean jpreserve_deletes) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ opt->preserve_deletes = jpreserve_deletes == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: preserveDeletes
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_preserveDeletes(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->preserve_deletes);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setTwoWriteQueues
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setTwoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle, jboolean jtwo_write_queues) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ opt->two_write_queues = jtwo_write_queues == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: twoWriteQueues
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_twoWriteQueues(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->two_write_queues);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setManualWalFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setManualWalFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jmanual_wal_flush) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: manualWalFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_manualWalFlush(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->manual_wal_flush);
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: setAtomicFlush
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_DBOptions_setAtomicFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jatomic_flush) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ opt->atomic_flush = jatomic_flush == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_DBOptions
+ * Method: atomicFlush
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_DBOptions_atomicFlush(
+ JNIEnv *, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
+ return static_cast<jboolean>(opt->atomic_flush);
+}
+
/*
* Class: org_rocksdb_DBOptions
* Method: setAvoidFlushDuringShutdown
* Signature: (JZ)V
*/
void Java_org_rocksdb_DBOptions_setAvoidFlushDuringShutdown(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean javoid_flush_during_shutdown) {
+ JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
opt->avoid_flush_during_shutdown =
static_cast<bool>(javoid_flush_during_shutdown);
* Method: avoidFlushDuringShutdown
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jhandle);
return static_cast<jboolean>(opt->avoid_flush_during_shutdown);
}
* Method: newWriteOptions
* Signature: ()J
*/
-jlong Java_org_rocksdb_WriteOptions_newWriteOptions(JNIEnv* /*env*/,
- jclass /*jcls*/) {
+jlong Java_org_rocksdb_WriteOptions_newWriteOptions(
+ JNIEnv*, jclass) {
auto* op = new rocksdb::WriteOptions();
return reinterpret_cast<jlong>(op);
}
* Method: copyWriteOptions
* Signature: (J)J
*/
-jlong Java_org_rocksdb_WriteOptions_copyWriteOptions(JNIEnv* /*env*/,
- jclass /*jcls*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_WriteOptions_copyWriteOptions(
+ JNIEnv*, jclass, jlong jhandle) {
auto new_opt = new rocksdb::WriteOptions(
*(reinterpret_cast<rocksdb::WriteOptions*>(jhandle)));
return reinterpret_cast<jlong>(new_opt);
* Method: disposeInternal
* Signature: ()V
*/
-void Java_org_rocksdb_WriteOptions_disposeInternal(JNIEnv* /*env*/,
- jobject /*jwrite_options*/,
- jlong jhandle) {
+void Java_org_rocksdb_WriteOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
auto* write_options = reinterpret_cast<rocksdb::WriteOptions*>(jhandle);
assert(write_options != nullptr);
delete write_options;
* Method: setSync
* Signature: (JZ)V
*/
-void Java_org_rocksdb_WriteOptions_setSync(JNIEnv* /*env*/,
- jobject /*jwrite_options*/,
- jlong jhandle, jboolean jflag) {
+void Java_org_rocksdb_WriteOptions_setSync(
+ JNIEnv*, jobject, jlong jhandle, jboolean jflag) {
reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->sync = jflag;
}
* Method: sync
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_WriteOptions_sync(JNIEnv* /*env*/,
- jobject /*jwrite_options*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_WriteOptions_sync(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->sync;
}
* Method: setDisableWAL
* Signature: (JZ)V
*/
-void Java_org_rocksdb_WriteOptions_setDisableWAL(JNIEnv* /*env*/,
- jobject /*jwrite_options*/,
- jlong jhandle,
- jboolean jflag) {
+void Java_org_rocksdb_WriteOptions_setDisableWAL(
+ JNIEnv*, jobject, jlong jhandle, jboolean jflag) {
reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->disableWAL = jflag;
}
* Method: disableWAL
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_WriteOptions_disableWAL(JNIEnv* /*env*/,
- jobject /*jwrite_options*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_WriteOptions_disableWAL(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->disableWAL;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_WriteOptions_setIgnoreMissingColumnFamilies(
- JNIEnv* /*env*/, jobject /*jwrite_options*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean jignore_missing_column_families) {
reinterpret_cast<rocksdb::WriteOptions*>(jhandle)
->ignore_missing_column_families =
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_WriteOptions_ignoreMissingColumnFamilies(
- JNIEnv* /*env*/, jobject /*jwrite_options*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)
->ignore_missing_column_families;
}
* Method: setNoSlowdown
* Signature: (JZ)V
*/
-void Java_org_rocksdb_WriteOptions_setNoSlowdown(JNIEnv* /*env*/,
- jobject /*jwrite_options*/,
- jlong jhandle,
- jboolean jno_slowdown) {
+void Java_org_rocksdb_WriteOptions_setNoSlowdown(
+ JNIEnv*, jobject, jlong jhandle, jboolean jno_slowdown) {
reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->no_slowdown =
static_cast<bool>(jno_slowdown);
}
* Method: noSlowdown
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_WriteOptions_noSlowdown(JNIEnv* /*env*/,
- jobject /*jwrite_options*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_WriteOptions_noSlowdown(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->no_slowdown;
}
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: setLowPri
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_WriteOptions_setLowPri(
+ JNIEnv*, jobject, jlong jhandle, jboolean jlow_pri) {
+ reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->low_pri =
+ static_cast<bool>(jlow_pri);
+}
+
+/*
+ * Class: org_rocksdb_WriteOptions
+ * Method: lowPri
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_WriteOptions_lowPri(
+ JNIEnv*, jobject, jlong jhandle) {
+ return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->low_pri;
+}
+
/////////////////////////////////////////////////////////////////////
// rocksdb::ReadOptions
* Method: newReadOptions
* Signature: ()J
*/
-jlong Java_org_rocksdb_ReadOptions_newReadOptions(JNIEnv* /*env*/,
- jclass /*jcls*/) {
+jlong Java_org_rocksdb_ReadOptions_newReadOptions__(
+ JNIEnv*, jclass) {
auto* read_options = new rocksdb::ReadOptions();
return reinterpret_cast<jlong>(read_options);
}
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: newReadOptions
+ * Signature: (ZZ)J
+ */
+jlong Java_org_rocksdb_ReadOptions_newReadOptions__ZZ(
+ JNIEnv*, jclass, jboolean jverify_checksums, jboolean jfill_cache) {
+ auto* read_options =
+ new rocksdb::ReadOptions(static_cast<bool>(jverify_checksums),
+ static_cast<bool>(jfill_cache));
+ return reinterpret_cast<jlong>(read_options);
+}
+
/*
* Class: org_rocksdb_ReadOptions
* Method: copyReadOptions
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ReadOptions_copyReadOptions(JNIEnv* /*env*/, jclass /*jcls*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ReadOptions_copyReadOptions(
+ JNIEnv*, jclass, jlong jhandle) {
auto new_opt = new rocksdb::ReadOptions(
*(reinterpret_cast<rocksdb::ReadOptions*>(jhandle)));
return reinterpret_cast<jlong>(new_opt);
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_ReadOptions_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_ReadOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
auto* read_options = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
assert(read_options != nullptr);
delete read_options;
* Signature: (JZ)V
*/
void Java_org_rocksdb_ReadOptions_setVerifyChecksums(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jverify_checksums) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jverify_checksums) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->verify_checksums =
static_cast<bool>(jverify_checksums);
}
* Method: verifyChecksums
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_verifyChecksums(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_verifyChecksums(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->verify_checksums;
}
* Method: setFillCache
* Signature: (JZ)V
*/
-void Java_org_rocksdb_ReadOptions_setFillCache(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jboolean jfill_cache) {
+void Java_org_rocksdb_ReadOptions_setFillCache(
+ JNIEnv*, jobject, jlong jhandle, jboolean jfill_cache) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->fill_cache =
static_cast<bool>(jfill_cache);
}
* Method: fillCache
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_fillCache(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_fillCache(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->fill_cache;
}
* Method: setTailing
* Signature: (JZ)V
*/
-void Java_org_rocksdb_ReadOptions_setTailing(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jboolean jtailing) {
+void Java_org_rocksdb_ReadOptions_setTailing(
+ JNIEnv*, jobject, jlong jhandle, jboolean jtailing) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->tailing =
static_cast<bool>(jtailing);
}
* Method: tailing
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_tailing(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_tailing(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->tailing;
}
* Method: managed
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_managed(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_managed(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->managed;
}
* Method: setManaged
* Signature: (JZ)V
*/
-void Java_org_rocksdb_ReadOptions_setManaged(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jboolean jmanaged) {
+void Java_org_rocksdb_ReadOptions_setManaged(
+ JNIEnv*, jobject, jlong jhandle, jboolean jmanaged) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->managed =
static_cast<bool>(jmanaged);
}
* Method: totalOrderSeek
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->total_order_seek;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ReadOptions_setTotalOrderSeek(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jtotal_order_seek) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jtotal_order_seek) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->total_order_seek =
static_cast<bool>(jtotal_order_seek);
}
* Method: prefixSameAsStart
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_same_as_start;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ReadOptions_setPrefixSameAsStart(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jprefix_same_as_start) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jprefix_same_as_start) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_same_as_start =
static_cast<bool>(jprefix_same_as_start);
}
* Method: pinData
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_pinData(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_pinData(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->pin_data;
}
* Method: setPinData
* Signature: (JZ)V
*/
-void Java_org_rocksdb_ReadOptions_setPinData(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle,
- jboolean jpin_data) {
+void Java_org_rocksdb_ReadOptions_setPinData(
+ JNIEnv*, jobject, jlong jhandle, jboolean jpin_data) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->pin_data =
static_cast<bool>(jpin_data);
}
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ReadOptions_backgroundPurgeOnIteratorCleanup(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
return static_cast<jboolean>(opt->background_purge_on_iterator_cleanup);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ReadOptions_setBackgroundPurgeOnIteratorCleanup(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
+ JNIEnv*, jobject, jlong jhandle,
jboolean jbackground_purge_on_iterator_cleanup) {
auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
opt->background_purge_on_iterator_cleanup =
* Method: readaheadSize
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ReadOptions_readaheadSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ReadOptions_readaheadSize(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
return static_cast<jlong>(opt->readahead_size);
}
* Method: setReadaheadSize
* Signature: (JJ)V
*/
-void Java_org_rocksdb_ReadOptions_setReadaheadSize(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jlong jreadahead_size) {
+void Java_org_rocksdb_ReadOptions_setReadaheadSize(
+ JNIEnv*, jobject, jlong jhandle, jlong jreadahead_size) {
auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
opt->readahead_size = static_cast<size_t>(jreadahead_size);
}
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: maxSkippableInternalKeys
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_maxSkippableInternalKeys(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
+ return static_cast<jlong>(opt->max_skippable_internal_keys);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setMaxSkippableInternalKeys
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setMaxSkippableInternalKeys(
+ JNIEnv*, jobject, jlong jhandle, jlong jmax_skippable_internal_keys) {
+ auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
+ opt->max_skippable_internal_keys =
+ static_cast<uint64_t>(jmax_skippable_internal_keys);
+}
+
/*
* Class: org_rocksdb_ReadOptions
* Method: ignoreRangeDeletions
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions(
+ JNIEnv*, jobject, jlong jhandle) {
auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
return static_cast<jboolean>(opt->ignore_range_deletions);
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ReadOptions_setIgnoreRangeDeletions(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean jignore_range_deletions) {
+ JNIEnv*, jobject, jlong jhandle, jboolean jignore_range_deletions) {
auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
opt->ignore_range_deletions = static_cast<bool>(jignore_range_deletions);
}
* Method: setSnapshot
* Signature: (JJ)V
*/
-void Java_org_rocksdb_ReadOptions_setSnapshot(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jlong jsnapshot) {
+void Java_org_rocksdb_ReadOptions_setSnapshot(
+ JNIEnv*, jobject, jlong jhandle, jlong jsnapshot) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->snapshot =
reinterpret_cast<rocksdb::Snapshot*>(jsnapshot);
}
* Method: snapshot
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ReadOptions_snapshot(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ReadOptions_snapshot(
+ JNIEnv*, jobject, jlong jhandle) {
auto& snapshot = reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->snapshot;
return reinterpret_cast<jlong>(snapshot);
}
* Method: readTier
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_ReadOptions_readTier(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_ReadOptions_readTier(
+ JNIEnv*, jobject, jlong jhandle) {
return static_cast<jbyte>(
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->read_tier);
}
* Method: setReadTier
* Signature: (JB)V
*/
-void Java_org_rocksdb_ReadOptions_setReadTier(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle, jbyte jread_tier) {
+void Java_org_rocksdb_ReadOptions_setReadTier(
+ JNIEnv*, jobject, jlong jhandle, jbyte jread_tier) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->read_tier =
static_cast<rocksdb::ReadTier>(jread_tier);
}
* Signature: (JJ)I
*/
void Java_org_rocksdb_ReadOptions_setIterateUpperBound(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jupper_bound_slice_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jupper_bound_slice_handle) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->iterate_upper_bound =
reinterpret_cast<rocksdb::Slice*>(jupper_bound_slice_handle);
}
* Method: iterateUpperBound
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ReadOptions_iterateUpperBound(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ReadOptions_iterateUpperBound(
+ JNIEnv*, jobject, jlong jhandle) {
auto& upper_bound_slice_handle =
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->iterate_upper_bound;
return reinterpret_cast<jlong>(upper_bound_slice_handle);
* Signature: (JJ)I
*/
void Java_org_rocksdb_ReadOptions_setIterateLowerBound(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jlower_bound_slice_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jlower_bound_slice_handle) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->iterate_lower_bound =
reinterpret_cast<rocksdb::Slice*>(jlower_bound_slice_handle);
}
* Method: iterateLowerBound
* Signature: (J)J
*/
-jlong Java_org_rocksdb_ReadOptions_iterateLowerBound(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jlong Java_org_rocksdb_ReadOptions_iterateLowerBound(
+ JNIEnv*, jobject, jlong jhandle) {
auto& lower_bound_slice_handle =
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->iterate_lower_bound;
return reinterpret_cast<jlong>(lower_bound_slice_handle);
}
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setTableFilter
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setTableFilter(
+ JNIEnv*, jobject, jlong jhandle, jlong jjni_table_filter_handle) {
+ auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
+ auto* jni_table_filter =
+ reinterpret_cast<rocksdb::TableFilterJniCallback*>(jjni_table_filter_handle);
+ opt->table_filter = jni_table_filter->GetTableFilterFunction();
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: setIterStartSeqnum
+ * Signature: (JJ)V
+ */
+void Java_org_rocksdb_ReadOptions_setIterStartSeqnum(
+ JNIEnv*, jobject, jlong jhandle, jlong jiter_start_seqnum) {
+ auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
+ opt->iter_start_seqnum = static_cast<uint64_t>(jiter_start_seqnum);
+}
+
+/*
+ * Class: org_rocksdb_ReadOptions
+ * Method: iterStartSeqnum
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_ReadOptions_iterStartSeqnum(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* opt = reinterpret_cast<rocksdb::ReadOptions*>(jhandle);
+ return static_cast<jlong>(opt->iter_start_seqnum);
+}
+
/////////////////////////////////////////////////////////////////////
// rocksdb::ComparatorOptions
* Method: newComparatorOptions
* Signature: ()J
*/
-jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(JNIEnv* /*env*/,
- jclass /*jcls*/) {
+jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(
+ JNIEnv*, jclass) {
auto* comparator_opt = new rocksdb::ComparatorJniCallbackOptions();
return reinterpret_cast<jlong>(comparator_opt);
}
* Method: useAdaptiveMutex
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_ComparatorOptions_useAdaptiveMutex(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_ComparatorOptions_useAdaptiveMutex(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle)
->use_adaptive_mutex;
}
* Signature: (JZ)V
*/
void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jboolean juse_adaptive_mutex) {
+ JNIEnv*, jobject, jlong jhandle, jboolean juse_adaptive_mutex) {
reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle)
->use_adaptive_mutex = static_cast<bool>(juse_adaptive_mutex);
}
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_ComparatorOptions_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_ComparatorOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
auto* comparator_opt =
reinterpret_cast<rocksdb::ComparatorJniCallbackOptions*>(jhandle);
assert(comparator_opt != nullptr);
* Method: newFlushOptions
* Signature: ()J
*/
-jlong Java_org_rocksdb_FlushOptions_newFlushOptions(JNIEnv* /*env*/,
- jclass /*jcls*/) {
+jlong Java_org_rocksdb_FlushOptions_newFlushOptions(
+ JNIEnv*, jclass) {
auto* flush_opt = new rocksdb::FlushOptions();
return reinterpret_cast<jlong>(flush_opt);
}
* Method: setWaitForFlush
* Signature: (JZ)V
*/
-void Java_org_rocksdb_FlushOptions_setWaitForFlush(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jboolean jwait) {
+void Java_org_rocksdb_FlushOptions_setWaitForFlush(
+ JNIEnv*, jobject, jlong jhandle, jboolean jwait) {
reinterpret_cast<rocksdb::FlushOptions*>(jhandle)->wait =
static_cast<bool>(jwait);
}
* Method: waitForFlush
* Signature: (J)Z
*/
-jboolean Java_org_rocksdb_FlushOptions_waitForFlush(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+jboolean Java_org_rocksdb_FlushOptions_waitForFlush(
+ JNIEnv*, jobject, jlong jhandle) {
return reinterpret_cast<rocksdb::FlushOptions*>(jhandle)->wait;
}
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: setAllowWriteStall
+ * Signature: (JZ)V
+ */
+void Java_org_rocksdb_FlushOptions_setAllowWriteStall(
+ JNIEnv*, jobject, jlong jhandle, jboolean jallow_write_stall) {
+ auto* flush_options = reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
+ flush_options->allow_write_stall = jallow_write_stall == JNI_TRUE;
+}
+
+/*
+ * Class: org_rocksdb_FlushOptions
+ * Method: allowWriteStall
+ * Signature: (J)Z
+ */
+jboolean Java_org_rocksdb_FlushOptions_allowWriteStall(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* flush_options = reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
+ return static_cast<jboolean>(flush_options->allow_write_stall);
+}
+
/*
* Class: org_rocksdb_FlushOptions
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_FlushOptions_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_FlushOptions_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
auto* flush_opt = reinterpret_cast<rocksdb::FlushOptions*>(jhandle);
assert(flush_opt != nullptr);
delete flush_opt;
// calling C++ rocksdb::OptionsUtil methods from Java side.
#include <jni.h>
+#include <string>
#include "include/org_rocksdb_OptionsUtil.h"
void Java_org_rocksdb_OptionsUtil_loadLatestOptions(
JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle,
jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) {
- const char* db_path = env->GetStringUTFChars(jdbpath, nullptr);
+ jboolean has_exception = JNI_FALSE;
+ auto db_path = rocksdb::JniUtil::copyStdString(env, jdbpath, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
std::vector<rocksdb::ColumnFamilyDescriptor> cf_descs;
rocksdb::Status s = rocksdb::LoadLatestOptions(
db_path, reinterpret_cast<rocksdb::Env*>(jenv_handle),
reinterpret_cast<rocksdb::DBOptions*>(jdb_opts_handle), &cf_descs,
ignore_unknown_options);
- env->ReleaseStringUTFChars(jdbpath, db_path);
-
if (!s.ok()) {
+ // error, raise an exception
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ } else {
+ build_column_family_descriptor_list(env, jcfds, cf_descs);
}
-
- build_column_family_descriptor_list(env, jcfds, cf_descs);
}
/*
void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile(
JNIEnv* env, jclass /*jcls*/, jstring jopts_file_name, jlong jenv_handle,
jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) {
- const char* opts_file_name = env->GetStringUTFChars(jopts_file_name, nullptr);
+ jboolean has_exception = JNI_FALSE;
+ auto opts_file_name = rocksdb::JniUtil::copyStdString(env, jopts_file_name, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
std::vector<rocksdb::ColumnFamilyDescriptor> cf_descs;
rocksdb::Status s = rocksdb::LoadOptionsFromFile(
opts_file_name, reinterpret_cast<rocksdb::Env*>(jenv_handle),
reinterpret_cast<rocksdb::DBOptions*>(jdb_opts_handle), &cf_descs,
ignore_unknown_options);
- env->ReleaseStringUTFChars(jopts_file_name, opts_file_name);
-
if (!s.ok()) {
+ // error, raise an exception
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ } else {
+ build_column_family_descriptor_list(env, jcfds, cf_descs);
}
-
- build_column_family_descriptor_list(env, jcfds, cf_descs);
}
/*
*/
jstring Java_org_rocksdb_OptionsUtil_getLatestOptionsFileName(
JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle) {
- const char* db_path = env->GetStringUTFChars(jdbpath, nullptr);
+ jboolean has_exception = JNI_FALSE;
+ auto db_path = rocksdb::JniUtil::copyStdString(env, jdbpath, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
std::string options_file_name;
- if (db_path != nullptr) {
- rocksdb::GetLatestOptionsFileName(
- db_path, reinterpret_cast<rocksdb::Env*>(jenv_handle),
- &options_file_name);
+ rocksdb::Status s = rocksdb::GetLatestOptionsFileName(
+ db_path, reinterpret_cast<rocksdb::Env*>(jenv_handle),
+ &options_file_name);
+ if (!s.ok()) {
+ // error, raise an exception
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ } else {
+ return env->NewStringUTF(options_file_name.c_str());
}
- env->ReleaseStringUTFChars(jdbpath, db_path);
-
- return env->NewStringUTF(options_file_name.c_str());
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// rocksdb::PersistentCache.
+
+#include <jni.h>
+#include <string>
+
+#include "include/org_rocksdb_PersistentCache.h"
+#include "rocksdb/persistent_cache.h"
+#include "loggerjnicallback.h"
+#include "portal.h"
+
+/*
+ * Class: org_rocksdb_PersistentCache
+ * Method: newPersistentCache
+ * Signature: (JLjava/lang/String;JJZ)J
+ */
+jlong Java_org_rocksdb_PersistentCache_newPersistentCache(
+ JNIEnv* env, jclass, jlong jenv_handle, jstring jpath,
+ jlong jsz, jlong jlogger_handle, jboolean joptimized_for_nvm) {
+ auto* rocks_env = reinterpret_cast<rocksdb::Env*>(jenv_handle);
+ jboolean has_exception = JNI_FALSE;
+ std::string path = rocksdb::JniUtil::copyStdString(env, jpath, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ return 0;
+ }
+ auto* logger =
+ reinterpret_cast<std::shared_ptr<rocksdb::LoggerJniCallback>*>(jlogger_handle);
+ auto* cache = new std::shared_ptr<rocksdb::PersistentCache>(nullptr);
+ rocksdb::Status s = rocksdb::NewPersistentCache(
+ rocks_env, path, static_cast<uint64_t>(jsz), *logger,
+ static_cast<bool>(joptimized_for_nvm), cache);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+ return reinterpret_cast<jlong>(cache);
+}
+
+/*
+ * Class: org_rocksdb_PersistentCache
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_PersistentCache_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* cache =
+ reinterpret_cast<std::shared_ptr<rocksdb::PersistentCache>*>(jhandle);
+ delete cache; // delete std::shared_ptr
+}
#ifndef JAVA_ROCKSJNI_PORTAL_H_
#define JAVA_ROCKSJNI_PORTAL_H_
+#include <algorithm>
#include <cstring>
-#include <jni.h>
#include <functional>
#include <iostream>
#include <iterator>
+#include <jni.h>
#include <limits>
#include <memory>
#include <string>
#include "rocksdb/filter_policy.h"
#include "rocksdb/rate_limiter.h"
#include "rocksdb/status.h"
+#include "rocksdb/table.h"
#include "rocksdb/utilities/backupable_db.h"
#include "rocksdb/utilities/memory_util.h"
#include "rocksdb/utilities/transaction_db.h"
#include "rocksjni/compaction_filter_factory_jnicallback.h"
#include "rocksjni/comparatorjnicallback.h"
#include "rocksjni/loggerjnicallback.h"
+#include "rocksjni/table_filter_jnicallback.h"
+#include "rocksjni/trace_writer_jnicallback.h"
#include "rocksjni/transaction_notifier_jnicallback.h"
+#include "rocksjni/wal_filter_jnicallback.h"
#include "rocksjni/writebatchhandlerjnicallback.h"
// Remove macro on windows
namespace rocksdb {
-// Detect if jlong overflows size_t
-inline Status check_if_jlong_fits_size_t(const jlong& jvalue) {
- Status s = Status::OK();
- if (static_cast<uint64_t>(jvalue) > std::numeric_limits<size_t>::max()) {
- s = Status::InvalidArgument(Slice("jlong overflows 32 bit value."));
- }
- return s;
-}
-
class JavaClass {
public:
/**
}
};
-// The portal class for org.rocksdb.RocksDB
-class RocksDBJni : public RocksDBNativeClass<rocksdb::DB*, RocksDBJni> {
+// The portal class for java.lang.IllegalArgumentException
+class IllegalArgumentExceptionJni :
+ public JavaException<IllegalArgumentExceptionJni> {
public:
/**
- * Get the Java Class org.rocksdb.RocksDB
+ * Get the Java Class java.lang.IllegalArgumentException
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB");
+ return JavaException::getJClass(env, "java/lang/IllegalArgumentException");
+ }
+
+ /**
+ * Create and throw a Java IllegalArgumentException with the provided status
+ *
+ * If s.ok() == true, then this function will not throw any exception.
+ *
+ * @param env A pointer to the Java environment
+ * @param s The status for the exception
+ *
+ * @return true if an exception was thrown, false otherwise
+ */
+ static bool ThrowNew(JNIEnv* env, const Status& s) {
+ assert(!s.ok());
+ if (s.ok()) {
+ return false;
+ }
+
+ // get the IllegalArgumentException class
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ std::cerr << "IllegalArgumentExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
+ return env->ExceptionCheck();
+ }
+
+ return JavaException::ThrowNew(env, s.ToString());
}
};
}
}
+ static std::unique_ptr<rocksdb::Status> toCppStatus(
+ const jbyte jcode_value, const jbyte jsub_code_value) {
+ std::unique_ptr<rocksdb::Status> status;
+ switch (jcode_value) {
+ case 0x0:
+ //Ok
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::OK()));
+ break;
+ case 0x1:
+ //NotFound
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::NotFound(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x2:
+ //Corruption
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::Corruption(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x3:
+ //NotSupported
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::NotSupported(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x4:
+ //InvalidArgument
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::InvalidArgument(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x5:
+ //IOError
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::IOError(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x6:
+ //MergeInProgress
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::MergeInProgress(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x7:
+ //Incomplete
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::Incomplete(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x8:
+ //ShutdownInProgress
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::ShutdownInProgress(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x9:
+ //TimedOut
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::TimedOut(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xA:
+ //Aborted
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::Aborted(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xB:
+ //Busy
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::Busy(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xC:
+ //Expired
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::Expired(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0xD:
+ //TryAgain
+ status = std::unique_ptr<rocksdb::Status>(
+ new rocksdb::Status(rocksdb::Status::TryAgain(
+ rocksdb::SubCodeJni::toCppSubCode(jsub_code_value))));
+ break;
+ case 0x7F:
+ default:
+ return nullptr;
+ }
+ return status;
+ }
+
// Returns the equivalent rocksdb::Status for the Java org.rocksdb.Status
static std::unique_ptr<rocksdb::Status> toCppStatus(JNIEnv* env, const jobject jstatus) {
jmethodID mid_code = getCodeMethod(env);
return nullptr;
}
- jbyte jsubCode_value = 0x0; // None
+ jbyte jsub_code_value = 0x0; // None
if (jsubCode != nullptr) {
jmethodID mid_subCode_value = rocksdb::SubCodeJni::getValueMethod(env);
if (mid_subCode_value == nullptr) {
// exception occurred
return nullptr;
}
- jsubCode_value =env->CallByteMethod(jsubCode, mid_subCode_value);
+ jsub_code_value = env->CallByteMethod(jsubCode, mid_subCode_value);
if (env->ExceptionCheck()) {
// exception occurred
if (jcode != nullptr) {
return nullptr;
}
- std::unique_ptr<rocksdb::Status> status;
- switch (jcode_value) {
- case 0x0:
- //Ok
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::OK()));
- break;
- case 0x1:
- //NotFound
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::NotFound(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x2:
- //Corruption
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::Corruption(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x3:
- //NotSupported
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::NotSupported(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x4:
- //InvalidArgument
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::InvalidArgument(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x5:
- //IOError
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::IOError(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x6:
- //MergeInProgress
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::MergeInProgress(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x7:
- //Incomplete
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::Incomplete(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x8:
- //ShutdownInProgress
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::ShutdownInProgress(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x9:
- //TimedOut
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::TimedOut(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0xA:
- //Aborted
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::Aborted(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0xB:
- //Busy
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::Busy(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0xC:
- //Expired
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::Expired(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0xD:
- //TryAgain
- status = std::unique_ptr<rocksdb::Status>(new rocksdb::Status(rocksdb::Status::TryAgain(rocksdb::SubCodeJni::toCppSubCode(jsubCode_value))));
- break;
- case 0x7F:
- default:
- return nullptr;
- }
+ std::unique_ptr<rocksdb::Status> status =
+ toCppStatus(jcode_value, jsub_code_value);
// delete all local refs
if (jstate != nullptr) {
* @return true if an exception was thrown, false otherwise
*/
static bool ThrowNew(JNIEnv* env, const Status& s) {
- assert(!s.ok());
if (s.ok()) {
return false;
}
}
};
-// The portal class for java.lang.IllegalArgumentException
-class IllegalArgumentExceptionJni :
- public JavaException<IllegalArgumentExceptionJni> {
+// The portal class for java.util.List
+class ListJni : public JavaClass {
public:
/**
- * Get the Java Class java.lang.IllegalArgumentException
+ * Get the Java Class java.util.List
*
* @param env A pointer to the Java environment
*
* ClassFormatError, ClassCircularityError, NoClassDefFoundError,
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jclass getJClass(JNIEnv* env) {
- return JavaException::getJClass(env, "java/lang/IllegalArgumentException");
+ static jclass getListClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/List");
}
/**
- * Create and throw a Java IllegalArgumentException with the provided status
- *
- * If s.ok() == true, then this function will not throw any exception.
+ * Get the Java Class java.util.ArrayList
*
* @param env A pointer to the Java environment
- * @param s The status for the exception
*
- * @return true if an exception was thrown, false otherwise
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static bool ThrowNew(JNIEnv* env, const Status& s) {
- assert(!s.ok());
- if (s.ok()) {
- return false;
- }
-
- // get the IllegalArgumentException class
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- std::cerr << "IllegalArgumentExceptionJni::ThrowNew/class - Error: unexpected exception!" << std::endl;
- return env->ExceptionCheck();
- }
-
- return JavaException::ThrowNew(env, s.ToString());
+ static jclass getArrayListClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/ArrayList");
}
-};
-
-// The portal class for org.rocksdb.Options
-class OptionsJni : public RocksDBNativeClass<
- rocksdb::Options*, OptionsJni> {
- public:
/**
- * Get the Java Class org.rocksdb.Options
+ * Get the Java Class java.util.Iterator
*
* @param env A pointer to the Java environment
*
* ClassFormatError, ClassCircularityError, NoClassDefFoundError,
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options");
+ static jclass getIteratorClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/Iterator");
}
-};
-// The portal class for org.rocksdb.DBOptions
-class DBOptionsJni : public RocksDBNativeClass<
- rocksdb::DBOptions*, DBOptionsJni> {
- public:
/**
- * Get the Java Class org.rocksdb.DBOptions
+ * Get the Java Method: List#iterator
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions");
+ static jmethodID getIteratorMethod(JNIEnv* env) {
+ jclass jlist_clazz = getListClass(env);
+ if(jlist_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jlist_clazz, "iterator", "()Ljava/util/Iterator;");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.ColumnFamilyOptions
-class ColumnFamilyOptionsJni
- : public RocksDBNativeClass<rocksdb::ColumnFamilyOptions*,
- ColumnFamilyOptionsJni> {
- public:
/**
- * Get the Java Class org.rocksdb.ColumnFamilyOptions
+ * Get the Java Method: Iterator#hasNext
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/ColumnFamilyOptions");
+ static jmethodID getHasNextMethod(JNIEnv* env) {
+ jclass jiterator_clazz = getIteratorClass(env);
+ if(jiterator_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jiterator_clazz, "hasNext", "()Z");
+ assert(mid != nullptr);
+ return mid;
}
/**
- * Create a new Java org.rocksdb.ColumnFamilyOptions object with the same
- * properties as the provided C++ rocksdb::ColumnFamilyOptions object
+ * Get the Java Method: Iterator#next
*
* @param env A pointer to the Java environment
- * @param cfoptions A pointer to rocksdb::ColumnFamilyOptions object
*
- * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or
- * nullptr if an an exception occurs
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jobject construct(JNIEnv* env, const ColumnFamilyOptions* cfoptions) {
- auto* cfo = new rocksdb::ColumnFamilyOptions(*cfoptions);
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
+ static jmethodID getNextMethod(JNIEnv* env) {
+ jclass jiterator_clazz = getIteratorClass(env);
+ if(jiterator_clazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- jmethodID mid = env->GetMethodID(jclazz, "<init>", "(J)V");
- if (mid == nullptr) {
- // exception thrown: NoSuchMethodException or OutOfMemoryError
+ static jmethodID mid =
+ env->GetMethodID(jiterator_clazz, "next", "()Ljava/lang/Object;");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: ArrayList constructor
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getArrayListConstructorMethodId(JNIEnv* env) {
+ jclass jarray_list_clazz = getArrayListClass(env);
+ if(jarray_list_clazz == nullptr) {
+ // exception occurred accessing class
return nullptr;
}
+ static jmethodID mid =
+ env->GetMethodID(jarray_list_clazz, "<init>", "(I)V");
+ assert(mid != nullptr);
+ return mid;
+ }
- jobject jcfd = env->NewObject(jclazz, mid, reinterpret_cast<jlong>(cfo));
- if (env->ExceptionCheck()) {
+ /**
+ * Get the Java Method: List#add
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getListAddMethodId(JNIEnv* env) {
+ jclass jlist_clazz = getListClass(env);
+ if(jlist_clazz == nullptr) {
+ // exception occurred accessing class
return nullptr;
}
- return jcfd;
+ static jmethodID mid =
+ env->GetMethodID(jlist_clazz, "add", "(Ljava/lang/Object;)Z");
+ assert(mid != nullptr);
+ return mid;
}
};
-// The portal class for org.rocksdb.WriteOptions
-class WriteOptionsJni : public RocksDBNativeClass<
- rocksdb::WriteOptions*, WriteOptionsJni> {
+// The portal class for java.lang.Byte
+class ByteJni : public JavaClass {
public:
/**
- * Get the Java Class org.rocksdb.WriteOptions
+ * Get the Java Class java.lang.Byte
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteOptions");
+ return JavaClass::getJClass(env, "java/lang/Byte");
}
-};
-// The portal class for org.rocksdb.ReadOptions
-class ReadOptionsJni : public RocksDBNativeClass<
- rocksdb::ReadOptions*, ReadOptionsJni> {
- public:
/**
- * Get the Java Class org.rocksdb.ReadOptions
+ * Get the Java Class byte[]
*
* @param env A pointer to the Java environment
*
* ClassFormatError, ClassCircularityError, NoClassDefFoundError,
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/ReadOptions");
+ static jclass getArrayJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "[B");
}
-};
-// The portal class for org.rocksdb.WriteBatch
-class WriteBatchJni : public RocksDBNativeClass<
- rocksdb::WriteBatch*, WriteBatchJni> {
- public:
/**
- * Get the Java Class org.rocksdb.WriteBatch
+ * Creates a new 2-dimensional Java Byte Array byte[][]
*
* @param env A pointer to the Java environment
+ * @param len The size of the first dimension
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return A reference to the Java byte[][] or nullptr if an exception occurs
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch");
+ static jobjectArray new2dByteArray(JNIEnv* env, const jsize len) {
+ jclass clazz = getArrayJClass(env);
+ if(clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ return env->NewObjectArray(len, clazz, nullptr);
}
/**
- * Create a new Java org.rocksdb.WriteBatch object
+ * Get the Java Method: Byte#byteValue
*
* @param env A pointer to the Java environment
- * @param wb A pointer to rocksdb::WriteBatch object
*
- * @return A reference to a Java org.rocksdb.WriteBatch object, or
- * nullptr if an an exception occurs
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retrieved
*/
- static jobject construct(JNIEnv* env, const WriteBatch* wb) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
+ static jmethodID getByteValueMethod(JNIEnv* env) {
+ jclass clazz = getJClass(env);
+ if(clazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- jmethodID mid = env->GetMethodID(jclazz, "<init>", "(J)V");
+ static jmethodID mid = env->GetMethodID(clazz, "byteValue", "()B");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Calls the Java Method: Byte#valueOf, returning a constructed Byte jobject
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A constructing Byte object or nullptr if the class or method id could not
+ * be retrieved, or an exception occurred
+ */
+ static jobject valueOf(JNIEnv* env, jbyte jprimitive_byte) {
+ jclass clazz = getJClass(env);
+ if (clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetStaticMethodID(clazz, "valueOf", "(B)Ljava/lang/Byte;");
if (mid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
return nullptr;
}
- jobject jwb = env->NewObject(jclazz, mid, reinterpret_cast<jlong>(wb));
+ const jobject jbyte_obj =
+ env->CallStaticObjectMethod(clazz, mid, jprimitive_byte);
if (env->ExceptionCheck()) {
+ // exception occurred
return nullptr;
}
- return jwb;
+ return jbyte_obj;
}
+
};
-// The portal class for org.rocksdb.WriteBatch.Handler
-class WriteBatchHandlerJni : public RocksDBNativeClass<
- const rocksdb::WriteBatchHandlerJniCallback*,
- WriteBatchHandlerJni> {
+// The portal class for java.lang.Integer
+class IntegerJni : public JavaClass {
public:
/**
- * Get the Java Class org.rocksdb.WriteBatch.Handler
+ * Get the Java Class java.lang.Integer
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/WriteBatch$Handler");
+ return JavaClass::getJClass(env, "java/lang/Integer");
}
- /**
- * Get the Java Method: WriteBatch.Handler#put
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getPutCfMethodId(JNIEnv* env) {
+ static jobject valueOf(JNIEnv* env, jint jprimitive_int) {
jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
+ if (jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "put", "(I[B[B)V");
- assert(mid != nullptr);
- return mid;
- }
+ jmethodID mid =
+ env->GetStaticMethodID(jclazz, "valueOf", "(I)Ljava/lang/Integer;");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
- /**
- * Get the Java Method: WriteBatch.Handler#put
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getPutMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
+ const jobject jinteger_obj =
+ env->CallStaticObjectMethod(jclazz, mid, jprimitive_int);
+ if (env->ExceptionCheck()) {
+ // exception occurred
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "put", "([B[B)V");
- assert(mid != nullptr);
- return mid;
+ return jinteger_obj;
}
+};
+// The portal class for java.lang.Long
+class LongJni : public JavaClass {
+ public:
/**
- * Get the Java Method: WriteBatch.Handler#merge
+ * Get the Java Class java.lang.Long
*
* @param env A pointer to the Java environment
*
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jmethodID getMergeCfMethodId(JNIEnv* env) {
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/lang/Long");
+ }
+
+ static jobject valueOf(JNIEnv* env, jlong jprimitive_long) {
jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
+ if (jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "merge", "(I[B[B)V");
- assert(mid != nullptr);
- return mid;
- }
+ jmethodID mid =
+ env->GetStaticMethodID(jclazz, "valueOf", "(J)Ljava/lang/Long;");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
- /**
- * Get the Java Method: WriteBatch.Handler#merge
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getMergeMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
+ const jobject jlong_obj =
+ env->CallStaticObjectMethod(jclazz, mid, jprimitive_long);
+ if (env->ExceptionCheck()) {
+ // exception occurred
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "merge", "([B[B)V");
- assert(mid != nullptr);
- return mid;
+ return jlong_obj;
}
+};
+// The portal class for java.lang.StringBuilder
+class StringBuilderJni : public JavaClass {
+ public:
/**
- * Get the Java Method: WriteBatch.Handler#delete
+ * Get the Java Class java.lang.StringBuilder
*
* @param env A pointer to the Java environment
*
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jmethodID getDeleteCfMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
-
- static jmethodID mid = env->GetMethodID(jclazz, "delete", "(I[B)V");
- assert(mid != nullptr);
- return mid;
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/lang/StringBuilder");
}
/**
- * Get the Java Method: WriteBatch.Handler#delete
+ * Get the Java Method: StringBuilder#append
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getDeleteMethodId(JNIEnv* env) {
+ static jmethodID getListAddMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "delete", "([B)V");
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "append",
+ "(Ljava/lang/String;)Ljava/lang/StringBuilder;");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: WriteBatch.Handler#singleDelete
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getSingleDeleteCfMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
-
- static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "(I[B)V");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Get the Java Method: WriteBatch.Handler#singleDelete
+ * Appends a C-style string to a StringBuilder
*
* @param env A pointer to the Java environment
+ * @param jstring_builder Reference to a java.lang.StringBuilder
+ * @param c_str A C-style string to append to the StringBuilder
*
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
+ * @return A reference to the updated StringBuilder, or a nullptr if
+ * an exception occurs
*/
- static jmethodID getSingleDeleteMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
+ static jobject append(JNIEnv* env, jobject jstring_builder,
+ const char* c_str) {
+ jmethodID mid = getListAddMethodId(env);
+ if(mid == nullptr) {
+ // exception occurred accessing class or method
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "([B)V");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Get the Java Method: WriteBatch.Handler#deleteRange
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getDeleteRangeCfMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if (jclazz == nullptr) {
- // exception occurred accessing class
+ jstring new_value_str = env->NewStringUTF(c_str);
+ if(new_value_str == nullptr) {
+ // exception thrown: OutOfMemoryError
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "(I[B[B)V");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Get the Java Method: WriteBatch.Handler#deleteRange
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getDeleteRangeMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if (jclazz == nullptr) {
- // exception occurred accessing class
+ jobject jresult_string_builder =
+ env->CallObjectMethod(jstring_builder, mid, new_value_str);
+ if(env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(new_value_str);
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "([B[B)V");
- assert(mid != nullptr);
- return mid;
+ return jresult_string_builder;
}
+};
- /**
- * Get the Java Method: WriteBatch.Handler#logData
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getLogDataMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
+// various utility functions for working with RocksDB and JNI
+class JniUtil {
+ public:
+ /**
+ * Detect if jlong overflows size_t
+ *
+ * @param jvalue the jlong value
+ *
+ * @return
+ */
+ inline static Status check_if_jlong_fits_size_t(const jlong& jvalue) {
+ Status s = Status::OK();
+ if (static_cast<uint64_t>(jvalue) > std::numeric_limits<size_t>::max()) {
+ s = Status::InvalidArgument(Slice("jlong overflows 32 bit value."));
+ }
+ return s;
}
- static jmethodID mid = env->GetMethodID(jclazz, "logData", "([B)V");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Get the Java Method: WriteBatch.Handler#putBlobIndex
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getPutBlobIndexCfMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
+ /**
+ * Obtains a reference to the JNIEnv from
+ * the JVM
+ *
+ * If the current thread is not attached to the JavaVM
+ * then it will be attached so as to retrieve the JNIEnv
+ *
+ * If a thread is attached, it must later be manually
+ * released by calling JavaVM::DetachCurrentThread.
+ * This can be handled by always matching calls to this
+ * function with calls to {@link JniUtil::releaseJniEnv(JavaVM*, jboolean)}
+ *
+ * @param jvm (IN) A pointer to the JavaVM instance
+ * @param attached (OUT) A pointer to a boolean which
+ * will be set to JNI_TRUE if we had to attach the thread
+ *
+ * @return A pointer to the JNIEnv or nullptr if a fatal error
+ * occurs and the JNIEnv cannot be retrieved
+ */
+ static JNIEnv* getJniEnv(JavaVM* jvm, jboolean* attached) {
+ assert(jvm != nullptr);
- static jmethodID mid = env->GetMethodID(jclazz, "putBlobIndex", "(I[B[B)V");
- assert(mid != nullptr);
- return mid;
- }
+ JNIEnv *env;
+ const jint env_rs = jvm->GetEnv(reinterpret_cast<void**>(&env),
+ JNI_VERSION_1_2);
- /**
- * Get the Java Method: WriteBatch.Handler#markBeginPrepare
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getMarkBeginPrepareMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
+ if(env_rs == JNI_OK) {
+ // current thread is already attached, return the JNIEnv
+ *attached = JNI_FALSE;
+ return env;
+ } else if(env_rs == JNI_EDETACHED) {
+ // current thread is not attached, attempt to attach
+ const jint rs_attach = jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
+ if(rs_attach == JNI_OK) {
+ *attached = JNI_TRUE;
+ return env;
+ } else {
+ // error, could not attach the thread
+ std::cerr << "JniUtil::getJniEnv - Fatal: could not attach current thread to JVM!" << std::endl;
+ return nullptr;
+ }
+ } else if(env_rs == JNI_EVERSION) {
+ // error, JDK does not support JNI_VERSION_1_2+
+ std::cerr << "JniUtil::getJniEnv - Fatal: JDK does not support JNI_VERSION_1_2" << std::endl;
+ return nullptr;
+ } else {
+ std::cerr << "JniUtil::getJniEnv - Fatal: Unknown error: env_rs=" << env_rs << std::endl;
+ return nullptr;
+ }
}
- static jmethodID mid = env->GetMethodID(jclazz, "markBeginPrepare", "()V");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Get the Java Method: WriteBatch.Handler#markEndPrepare
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getMarkEndPrepareMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
+ /**
+ * Counterpart to {@link JniUtil::getJniEnv(JavaVM*, jboolean*)}
+ *
+ * Detachess the current thread from the JVM if it was previously
+ * attached
+ *
+ * @param jvm (IN) A pointer to the JavaVM instance
+ * @param attached (IN) JNI_TRUE if we previously had to attach the thread
+ * to the JavaVM to get the JNIEnv
+ */
+ static void releaseJniEnv(JavaVM* jvm, jboolean& attached) {
+ assert(jvm != nullptr);
+ if(attached == JNI_TRUE) {
+ const jint rs_detach = jvm->DetachCurrentThread();
+ assert(rs_detach == JNI_OK);
+ if(rs_detach != JNI_OK) {
+ std::cerr << "JniUtil::getJniEnv - Warn: Unable to detach current thread from JVM!" << std::endl;
+ }
+ }
}
- static jmethodID mid = env->GetMethodID(jclazz, "markEndPrepare", "([B)V");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Get the Java Method: WriteBatch.Handler#markNoop
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getMarkNoopMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
+ /**
+ * Copies a Java String[] to a C++ std::vector<std::string>
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jss (IN) The Java String array to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError or ArrayIndexOutOfBoundsException
+ * exception occurs
+ *
+ * @return A std::vector<std:string> containing copies of the Java strings
+ */
+ static std::vector<std::string> copyStrings(JNIEnv* env,
+ jobjectArray jss, jboolean* has_exception) {
+ return rocksdb::JniUtil::copyStrings(env, jss,
+ env->GetArrayLength(jss), has_exception);
}
- static jmethodID mid = env->GetMethodID(jclazz, "markNoop", "(Z)V");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Get the Java Method: WriteBatch.Handler#markRollback
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getMarkRollbackMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
+ /**
+ * Copies a Java String[] to a C++ std::vector<std::string>
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jss (IN) The Java String array to copy
+ * @param jss_len (IN) The length of the Java String array to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError or ArrayIndexOutOfBoundsException
+ * exception occurs
+ *
+ * @return A std::vector<std:string> containing copies of the Java strings
+ */
+ static std::vector<std::string> copyStrings(JNIEnv* env,
+ jobjectArray jss, const jsize jss_len, jboolean* has_exception) {
+ std::vector<std::string> strs;
+ strs.reserve(jss_len);
+ for (jsize i = 0; i < jss_len; i++) {
+ jobject js = env->GetObjectArrayElement(jss, i);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ *has_exception = JNI_TRUE;
+ return strs;
+ }
- static jmethodID mid = env->GetMethodID(jclazz, "markRollback", "([B)V");
- assert(mid != nullptr);
- return mid;
- }
+ jstring jstr = static_cast<jstring>(js);
+ const char* str = env->GetStringUTFChars(jstr, nullptr);
+ if(str == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(js);
+ *has_exception = JNI_TRUE;
+ return strs;
+ }
- /**
- * Get the Java Method: WriteBatch.Handler#markCommit
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getMarkCommitMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
+ strs.push_back(std::string(str));
- static jmethodID mid = env->GetMethodID(jclazz, "markCommit", "([B)V");
- assert(mid != nullptr);
- return mid;
- }
+ env->ReleaseStringUTFChars(jstr, str);
+ env->DeleteLocalRef(js);
+ }
- /**
- * Get the Java Method: WriteBatch.Handler#shouldContinue
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getContinueMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
+ *has_exception = JNI_FALSE;
+ return strs;
}
- static jmethodID mid = env->GetMethodID(jclazz, "shouldContinue", "()Z");
- assert(mid != nullptr);
- return mid;
- }
-};
-
-class WriteBatchSavePointJni : public JavaClass {
- public:
- /**
- * Get the Java Class org.rocksdb.WriteBatch.SavePoint
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
- */
- static jclass getJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "org/rocksdb/WriteBatch$SavePoint");
- }
+ /**
+ * Copies a jstring to a C-style null-terminated byte string
+ * and releases the original jstring
+ *
+ * The jstring is copied as UTF-8
+ *
+ * If an exception occurs, then JNIEnv::ExceptionCheck()
+ * will have been called
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param js (IN) The java string to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ *
+ * @return A pointer to the copied string, or a
+ * nullptr if has_exception == JNI_TRUE
+ */
+ static std::unique_ptr<char[]> copyString(JNIEnv* env, jstring js,
+ jboolean* has_exception) {
+ const char *utf = env->GetStringUTFChars(js, nullptr);
+ if(utf == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ExceptionCheck();
+ *has_exception = JNI_TRUE;
+ return nullptr;
+ } else if(env->ExceptionCheck()) {
+ // exception thrown
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_TRUE;
+ return nullptr;
+ }
- /**
- * Get the Java Method: HistogramData constructor
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
- */
- static jmethodID getConstructorMethodId(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
+ const jsize utf_len = env->GetStringUTFLength(js);
+ std::unique_ptr<char[]> str(new char[utf_len + 1]); // Note: + 1 is needed for the c_str null terminator
+ std::strcpy(str.get(), utf);
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_FALSE;
+ return str;
}
- static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JJJ)V");
- assert(mid != nullptr);
- return mid;
- }
+ /**
+ * Copies a jstring to a std::string
+ * and releases the original jstring
+ *
+ * If an exception occurs, then JNIEnv::ExceptionCheck()
+ * will have been called
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param js (IN) The java string to copy
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ *
+ * @return A std:string copy of the jstring, or an
+ * empty std::string if has_exception == JNI_TRUE
+ */
+ static std::string copyStdString(JNIEnv* env, jstring js,
+ jboolean* has_exception) {
+ const char *utf = env->GetStringUTFChars(js, nullptr);
+ if(utf == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ExceptionCheck();
+ *has_exception = JNI_TRUE;
+ return std::string();
+ } else if(env->ExceptionCheck()) {
+ // exception thrown
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_TRUE;
+ return std::string();
+ }
- /**
- * Create a new Java org.rocksdb.WriteBatch.SavePoint object
- *
- * @param env A pointer to the Java environment
- * @param savePoint A pointer to rocksdb::WriteBatch::SavePoint object
- *
- * @return A reference to a Java org.rocksdb.WriteBatch.SavePoint object, or
- * nullptr if an an exception occurs
- */
- static jobject construct(JNIEnv* env, const SavePoint &save_point) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
+ std::string name(utf);
+ env->ReleaseStringUTFChars(js, utf);
+ *has_exception = JNI_FALSE;
+ return name;
}
- jmethodID mid = getConstructorMethodId(env);
- if (mid == nullptr) {
- // exception thrown: NoSuchMethodException or OutOfMemoryError
- return nullptr;
+ /**
+ * Copies bytes from a std::string to a jByteArray
+ *
+ * @param env A pointer to the java environment
+ * @param bytes The bytes to copy
+ *
+ * @return the Java byte[], or nullptr if an exception occurs
+ *
+ * @throws RocksDBException thrown
+ * if memory size to copy exceeds general java specific array size limitation.
+ */
+ static jbyteArray copyBytes(JNIEnv* env, std::string bytes) {
+ return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size());
}
- jobject jsave_point = env->NewObject(jclazz, mid,
- static_cast<jlong>(save_point.size),
- static_cast<jlong>(save_point.count),
- static_cast<jlong>(save_point.content_flags));
- if (env->ExceptionCheck()) {
- return nullptr;
- }
+ /**
+ * Given a Java byte[][] which is an array of java.lang.Strings
+ * where each String is a byte[], the passed function `string_fn`
+ * will be called on each String, the result is the collected by
+ * calling the passed function `collector_fn`
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jbyte_strings (IN) A Java array of Strings expressed as bytes
+ * @param string_fn (IN) A transform function to call for each String
+ * @param collector_fn (IN) A collector which is called for the result
+ * of each `string_fn`
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an ArrayIndexOutOfBoundsException or OutOfMemoryError
+ * exception occurs
+ */
+ template <typename T> static void byteStrings(JNIEnv* env,
+ jobjectArray jbyte_strings,
+ std::function<T(const char*, const size_t)> string_fn,
+ std::function<void(size_t, T)> collector_fn,
+ jboolean *has_exception) {
+ const jsize jlen = env->GetArrayLength(jbyte_strings);
- return jsave_point;
- }
-};
+ for(jsize i = 0; i < jlen; i++) {
+ jobject jbyte_string_obj = env->GetObjectArrayElement(jbyte_strings, i);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ *has_exception = JNI_TRUE; // signal error
+ return;
+ }
-// The portal class for org.rocksdb.WriteBatchWithIndex
-class WriteBatchWithIndexJni : public RocksDBNativeClass<
- rocksdb::WriteBatchWithIndex*, WriteBatchWithIndexJni> {
+ jbyteArray jbyte_string_ary =
+ reinterpret_cast<jbyteArray>(jbyte_string_obj);
+ T result = byteString(env, jbyte_string_ary, string_fn, has_exception);
+
+ env->DeleteLocalRef(jbyte_string_obj);
+
+ if(*has_exception == JNI_TRUE) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ collector_fn(i, result);
+ }
+
+ *has_exception = JNI_FALSE;
+ }
+
+ /**
+ * Given a Java String which is expressed as a Java Byte Array byte[],
+ * the passed function `string_fn` will be called on the String
+ * and the result returned
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jbyte_string_ary (IN) A Java String expressed in bytes
+ * @param string_fn (IN) A transform function to call on the String
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ */
+ template <typename T> static T byteString(JNIEnv* env,
+ jbyteArray jbyte_string_ary,
+ std::function<T(const char*, const size_t)> string_fn,
+ jboolean* has_exception) {
+ const jsize jbyte_string_len = env->GetArrayLength(jbyte_string_ary);
+ return byteString<T>(env, jbyte_string_ary, jbyte_string_len, string_fn,
+ has_exception);
+ }
+
+ /**
+ * Given a Java String which is expressed as a Java Byte Array byte[],
+ * the passed function `string_fn` will be called on the String
+ * and the result returned
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param jbyte_string_ary (IN) A Java String expressed in bytes
+ * @param jbyte_string_len (IN) The length of the Java String
+ * expressed in bytes
+ * @param string_fn (IN) A transform function to call on the String
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an OutOfMemoryError exception occurs
+ */
+ template <typename T> static T byteString(JNIEnv* env,
+ jbyteArray jbyte_string_ary, const jsize jbyte_string_len,
+ std::function<T(const char*, const size_t)> string_fn,
+ jboolean* has_exception) {
+ jbyte* jbyte_string =
+ env->GetByteArrayElements(jbyte_string_ary, nullptr);
+ if(jbyte_string == nullptr) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = JNI_TRUE;
+ return nullptr; // signal error
+ }
+
+ T result =
+ string_fn(reinterpret_cast<char *>(jbyte_string), jbyte_string_len);
+
+ env->ReleaseByteArrayElements(jbyte_string_ary, jbyte_string, JNI_ABORT);
+
+ *has_exception = JNI_FALSE;
+ return result;
+ }
+
+ /**
+ * Converts a std::vector<string> to a Java byte[][] where each Java String
+ * is expressed as a Java Byte Array byte[].
+ *
+ * @param env A pointer to the java environment
+ * @param strings A vector of Strings
+ *
+ * @return A Java array of Strings expressed as bytes,
+ * or nullptr if an exception is thrown
+ */
+ static jobjectArray stringsBytes(JNIEnv* env, std::vector<std::string> strings) {
+ jclass jcls_ba = ByteJni::getArrayJClass(env);
+ if(jcls_ba == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const jsize len = static_cast<jsize>(strings.size());
+
+ jobjectArray jbyte_strings = env->NewObjectArray(len, jcls_ba, nullptr);
+ if(jbyte_strings == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (jsize i = 0; i < len; i++) {
+ std::string *str = &strings[i];
+ const jsize str_len = static_cast<jsize>(str->size());
+
+ jbyteArray jbyte_string_ary = env->NewByteArray(str_len);
+ if(jbyte_string_ary == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jbyte_strings);
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(
+ jbyte_string_ary, 0, str_len,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(str->c_str())));
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jbyte_string_ary);
+ env->DeleteLocalRef(jbyte_strings);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jbyte_strings, i, jbyte_string_ary);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ // or ArrayStoreException
+ env->DeleteLocalRef(jbyte_string_ary);
+ env->DeleteLocalRef(jbyte_strings);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jbyte_string_ary);
+ }
+
+ return jbyte_strings;
+ }
+
+ /**
+ * Converts a std::vector<std::string> to a Java String[].
+ *
+ * @param env A pointer to the java environment
+ * @param strings A vector of Strings
+ *
+ * @return A Java array of Strings,
+ * or nullptr if an exception is thrown
+ */
+ static jobjectArray toJavaStrings(JNIEnv* env,
+ const std::vector<std::string>* strings) {
+ jclass jcls_str = env->FindClass("java/lang/String");
+ if(jcls_str == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const jsize len = static_cast<jsize>(strings->size());
+
+ jobjectArray jstrings = env->NewObjectArray(len, jcls_str, nullptr);
+ if(jstrings == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (jsize i = 0; i < len; i++) {
+ const std::string *str = &((*strings)[i]);
+ jstring js = rocksdb::JniUtil::toJavaString(env, str);
+ if (js == nullptr) {
+ env->DeleteLocalRef(jstrings);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jstrings, i, js);
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ // or ArrayStoreException
+ env->DeleteLocalRef(js);
+ env->DeleteLocalRef(jstrings);
+ return nullptr;
+ }
+ }
+
+ return jstrings;
+ }
+
+ /**
+ * Creates a Java UTF String from a C++ std::string
+ *
+ * @param env A pointer to the java environment
+ * @param string the C++ std::string
+ * @param treat_empty_as_null true if empty strings should be treated as null
+ *
+ * @return the Java UTF string, or nullptr if the provided string
+ * is null (or empty and treat_empty_as_null is set), or if an
+ * exception occurs allocating the Java String.
+ */
+ static jstring toJavaString(JNIEnv* env, const std::string* string,
+ const bool treat_empty_as_null = false) {
+ if (string == nullptr) {
+ return nullptr;
+ }
+
+ if (treat_empty_as_null && string->empty()) {
+ return nullptr;
+ }
+
+ return env->NewStringUTF(string->c_str());
+ }
+
+ /**
+ * Copies bytes to a new jByteArray with the check of java array size limitation.
+ *
+ * @param bytes pointer to memory to copy to a new jByteArray
+ * @param size number of bytes to copy
+ *
+ * @return the Java byte[], or nullptr if an exception occurs
+ *
+ * @throws RocksDBException thrown
+ * if memory size to copy exceeds general java array size limitation to avoid overflow.
+ */
+ static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) {
+ // Limitation for java array size is vm specific
+ // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1)
+ // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5)
+ // It means that the next call to env->NewByteArray can still end with
+ // OutOfMemoryError("Requested array size exceeds VM limit") coming from VM
+ static const size_t MAX_JARRAY_SIZE = (static_cast<size_t>(1)) << 31;
+ if(size > MAX_JARRAY_SIZE) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit");
+ return nullptr;
+ }
+
+ const jsize jlen = static_cast<jsize>(size);
+ jbyteArray jbytes = env->NewByteArray(jlen);
+ if(jbytes == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(jbytes, 0, jlen,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes)));
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jbytes);
+ return nullptr;
+ }
+
+ return jbytes;
+ }
+
+ /**
+ * Copies bytes from a rocksdb::Slice to a jByteArray
+ *
+ * @param env A pointer to the java environment
+ * @param bytes The bytes to copy
+ *
+ * @return the Java byte[] or nullptr if an exception occurs
+ *
+ * @throws RocksDBException thrown
+ * if memory size to copy exceeds general java specific array size limitation.
+ */
+ static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) {
+ return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size());
+ }
+
+ /*
+ * Helper for operations on a key and value
+ * for example WriteBatch->Put
+ *
+ * TODO(AR) could be used for RocksDB->Put etc.
+ */
+ static std::unique_ptr<rocksdb::Status> kv_op(
+ std::function<rocksdb::Status(rocksdb::Slice, rocksdb::Slice)> op,
+ JNIEnv* env, jobject /*jobj*/,
+ jbyteArray jkey, jint jkey_len,
+ jbyteArray jvalue, jint jvalue_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jbyte* value = env->GetByteArrayElements(jvalue, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+ return nullptr;
+ }
+
+ rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+ rocksdb::Slice value_slice(reinterpret_cast<char*>(value),
+ jvalue_len);
+
+ auto status = op(key_slice, value_slice);
+
+ if(value != nullptr) {
+ env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT);
+ }
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+
+ return std::unique_ptr<rocksdb::Status>(new rocksdb::Status(status));
+ }
+
+ /*
+ * Helper for operations on a key
+ * for example WriteBatch->Delete
+ *
+ * TODO(AR) could be used for RocksDB->Delete etc.
+ */
+ static std::unique_ptr<rocksdb::Status> k_op(
+ std::function<rocksdb::Status(rocksdb::Slice)> op,
+ JNIEnv* env, jobject /*jobj*/,
+ jbyteArray jkey, jint jkey_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ auto status = op(key_slice);
+
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+
+ return std::unique_ptr<rocksdb::Status>(new rocksdb::Status(status));
+ }
+
+ /*
+ * Helper for operations on a value
+ * for example WriteBatchWithIndex->GetFromBatch
+ */
+ static jbyteArray v_op(
+ std::function<rocksdb::Status(rocksdb::Slice, std::string*)> op,
+ JNIEnv* env, jbyteArray jkey, jint jkey_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if(env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+
+ std::string value;
+ rocksdb::Status s = op(key_slice, &value);
+
+ if(key != nullptr) {
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ }
+
+ if (s.IsNotFound()) {
+ return nullptr;
+ }
+
+ if (s.ok()) {
+ jbyteArray jret_value =
+ env->NewByteArray(static_cast<jsize>(value.size()));
+ if(jret_value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(jret_value, 0, static_cast<jsize>(value.size()),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value.c_str())));
+ if(env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ if(jret_value != nullptr) {
+ env->DeleteLocalRef(jret_value);
+ }
+ return nullptr;
+ }
+
+ return jret_value;
+ }
+
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ /**
+ * Creates a vector<T*> of C++ pointers from
+ * a Java array of C++ pointer addresses.
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param pointers (IN) A Java array of C++ pointer addresses
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an ArrayIndexOutOfBoundsException or OutOfMemoryError
+ * exception occurs.
+ *
+ * @return A vector of C++ pointers.
+ */
+ template<typename T> static std::vector<T*> fromJPointers(
+ JNIEnv* env, jlongArray jptrs, jboolean *has_exception) {
+ const jsize jptrs_len = env->GetArrayLength(jptrs);
+ std::vector<T*> ptrs;
+ jlong* jptr = env->GetLongArrayElements(jptrs, nullptr);
+ if (jptr == nullptr) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = JNI_TRUE;
+ return ptrs;
+ }
+ ptrs.reserve(jptrs_len);
+ for (jsize i = 0; i < jptrs_len; i++) {
+ ptrs.push_back(reinterpret_cast<T*>(jptr[i]));
+ }
+ env->ReleaseLongArrayElements(jptrs, jptr, JNI_ABORT);
+ return ptrs;
+ }
+
+ /**
+ * Creates a Java array of C++ pointer addresses
+ * from a vector of C++ pointers.
+ *
+ * @param env (IN) A pointer to the java environment
+ * @param pointers (IN) A vector of C++ pointers
+ * @param has_exception (OUT) will be set to JNI_TRUE
+ * if an ArrayIndexOutOfBoundsException or OutOfMemoryError
+ * exception occurs
+ *
+ * @return Java array of C++ pointer addresses.
+ */
+ template<typename T> static jlongArray toJPointers(JNIEnv* env,
+ const std::vector<T*> &pointers,
+ jboolean *has_exception) {
+ const jsize len = static_cast<jsize>(pointers.size());
+ std::unique_ptr<jlong[]> results(new jlong[len]);
+ std::transform(pointers.begin(), pointers.end(), results.get(), [](T* pointer) -> jlong {
+ return reinterpret_cast<jlong>(pointer);
+ });
+
+ jlongArray jpointers = env->NewLongArray(len);
+ if (jpointers == nullptr) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = JNI_TRUE;
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jpointers, 0, len, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ *has_exception = JNI_TRUE;
+ env->DeleteLocalRef(jpointers);
+ return nullptr;
+ }
+
+ *has_exception = JNI_FALSE;
+
+ return jpointers;
+ }
+};
+
+class MapJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.util.Map
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/Map");
+ }
+
+ /**
+ * Get the Java Method: Map#put
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMapPutMethodId(JNIEnv* env) {
+ jclass jlist_clazz = getJClass(env);
+ if(jlist_clazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jlist_clazz, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+class HashMapJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class java.util.HashMap
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "java/util/HashMap");
+ }
+
+ /**
+ * Create a new Java java.util.HashMap object.
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return A reference to a Java java.util.HashMap object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const uint32_t initial_capacity = 16) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(I)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jhash_map = env->NewObject(jclazz, mid, static_cast<jint>(initial_capacity));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * A function which maps a std::pair<K,V> to a std::pair<JK, JV>
+ *
+ * @return Either a pointer to a std::pair<jobject, jobject>, or nullptr
+ * if an error occurs during the mapping
+ */
+ template <typename K, typename V, typename JK, typename JV>
+ using FnMapKV = std::function<std::unique_ptr<std::pair<JK, JV>> (const std::pair<K, V>&)>;
+
+ // template <class I, typename K, typename V, typename K1, typename V1, typename std::enable_if<std::is_same<typename std::iterator_traits<I>::value_type, std::pair<const K,V>>::value, int32_t>::type = 0>
+ // static void putAll(JNIEnv* env, const jobject jhash_map, I iterator, const FnMapKV<const K,V,K1,V1> &fn_map_kv) {
+ /**
+ * Returns true if it succeeds, false if an error occurs
+ */
+ template<class iterator_type, typename K, typename V>
+ static bool putAll(JNIEnv* env, const jobject jhash_map, iterator_type iterator, iterator_type end, const FnMapKV<K, V, jobject, jobject> &fn_map_kv) {
+ const jmethodID jmid_put = rocksdb::MapJni::getMapPutMethodId(env);
+ if (jmid_put == nullptr) {
+ return false;
+ }
+
+ for (auto it = iterator; it != end; ++it) {
+ const std::unique_ptr<std::pair<jobject, jobject>> result = fn_map_kv(*it);
+ if (result == nullptr) {
+ // an error occurred during fn_map_kv
+ return false;
+ }
+ env->CallObjectMethod(jhash_map, jmid_put, result->first, result->second);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(result->second);
+ env->DeleteLocalRef(result->first);
+ return false;
+ }
+
+ // release local references
+ env->DeleteLocalRef(result->second);
+ env->DeleteLocalRef(result->first);
+ }
+
+ return true;
+ }
+
+ /**
+ * Creates a java.util.Map<String, String> from a std::map<std::string, std::string>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<std::string, std::string>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const rocksdb::HashMapJni::FnMapKV<const std::string, const std::string, jobject, jobject> fn_map_kv =
+ [env](const std::pair<const std::string, const std::string>& kv) {
+ jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jstring jvalue = rocksdb::JniUtil::toJavaString(env, &(kv.second), true);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(new std::pair<jobject, jobject>(static_cast<jobject>(jkey), static_cast<jobject>(jvalue)));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * Creates a java.util.Map<String, Long> from a std::map<std::string, uint32_t>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<std::string, uint32_t>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const rocksdb::HashMapJni::FnMapKV<const std::string, const uint32_t, jobject, jobject> fn_map_kv =
+ [env](const std::pair<const std::string, const uint32_t>& kv) {
+ jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jvalue = rocksdb::IntegerJni::valueOf(env, static_cast<jint>(kv.second));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(new std::pair<jobject, jobject>(static_cast<jobject>(jkey), jvalue));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * Creates a java.util.Map<String, Long> from a std::map<std::string, uint64_t>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<std::string, uint64_t>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const rocksdb::HashMapJni::FnMapKV<const std::string, const uint64_t, jobject, jobject> fn_map_kv =
+ [env](const std::pair<const std::string, const uint64_t>& kv) {
+ jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jvalue = rocksdb::LongJni::valueOf(env, static_cast<jlong>(kv.second));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(new std::pair<jobject, jobject>(static_cast<jobject>(jkey), jvalue));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+
+ /**
+ * Creates a java.util.Map<String, Long> from a std::map<uint32_t, uint64_t>
+ *
+ * @param env A pointer to the Java environment
+ * @param map the Cpp map
+ *
+ * @return a reference to the Java java.util.Map object, or nullptr if an exception occcurred
+ */
+ static jobject fromCppMap(JNIEnv* env, const std::map<uint32_t, uint64_t>* map) {
+ if (map == nullptr) {
+ return nullptr;
+ }
+
+ jobject jhash_map = construct(env, static_cast<uint32_t>(map->size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const rocksdb::HashMapJni::FnMapKV<const uint32_t, const uint64_t, jobject, jobject> fn_map_kv =
+ [env](const std::pair<const uint32_t, const uint64_t>& kv) {
+ jobject jkey = rocksdb::IntegerJni::valueOf(env, static_cast<jint>(kv.first));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jvalue = rocksdb::LongJni::valueOf(env, static_cast<jlong>(kv.second));
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(new std::pair<jobject, jobject>(static_cast<jobject>(jkey), jvalue));
+ };
+
+ if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+ }
+};
+
+// The portal class for org.rocksdb.RocksDB
+class RocksDBJni : public RocksDBNativeClass<rocksdb::DB*, RocksDBJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.RocksDB
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB");
+ }
+};
+
+// The portal class for org.rocksdb.Options
+class OptionsJni : public RocksDBNativeClass<
+ rocksdb::Options*, OptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.Options
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options");
+ }
+};
+
+// The portal class for org.rocksdb.DBOptions
+class DBOptionsJni : public RocksDBNativeClass<
+ rocksdb::DBOptions*, DBOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.DBOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions");
+ }
+};
+
+// The portal class for org.rocksdb.ColumnFamilyOptions
+class ColumnFamilyOptionsJni
+ : public RocksDBNativeClass<rocksdb::ColumnFamilyOptions*,
+ ColumnFamilyOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ColumnFamilyOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/ColumnFamilyOptions");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.ColumnFamilyOptions object with the same
+ * properties as the provided C++ rocksdb::ColumnFamilyOptions object
+ *
+ * @param env A pointer to the Java environment
+ * @param cfoptions A pointer to rocksdb::ColumnFamilyOptions object
+ *
+ * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const ColumnFamilyOptions* cfoptions) {
+ auto* cfo = new rocksdb::ColumnFamilyOptions(*cfoptions);
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(J)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jcfd = env->NewObject(jclazz, mid, reinterpret_cast<jlong>(cfo));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jcfd;
+ }
+};
+
+// The portal class for org.rocksdb.WriteOptions
+class WriteOptionsJni : public RocksDBNativeClass<
+ rocksdb::WriteOptions*, WriteOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WriteOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteOptions");
+ }
+};
+
+// The portal class for org.rocksdb.ReadOptions
+class ReadOptionsJni : public RocksDBNativeClass<
+ rocksdb::ReadOptions*, ReadOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ReadOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/ReadOptions");
+ }
+};
+
+// The portal class for org.rocksdb.WriteBatch
+class WriteBatchJni : public RocksDBNativeClass<
+ rocksdb::WriteBatch*, WriteBatchJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.WriteBatch
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.WriteBatch object
+ *
+ * @param env A pointer to the Java environment
+ * @param wb A pointer to rocksdb::WriteBatch object
+ *
+ * @return A reference to a Java org.rocksdb.WriteBatch object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, const WriteBatch* wb) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(J)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jwb = env->NewObject(jclazz, mid, reinterpret_cast<jlong>(wb));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jwb;
+ }
+};
+
+// The portal class for org.rocksdb.WriteBatch.Handler
+class WriteBatchHandlerJni : public RocksDBNativeClass<
+ const rocksdb::WriteBatchHandlerJniCallback*,
+ WriteBatchHandlerJni> {
public:
/**
- * Get the Java Class org.rocksdb.WriteBatchWithIndex
+ * Get the Java Class org.rocksdb.WriteBatch.Handler
*
* @param env A pointer to the Java environment
*
*/
static jclass getJClass(JNIEnv* env) {
return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/WriteBatchWithIndex");
+ "org/rocksdb/WriteBatch$Handler");
}
-};
-// The portal class for org.rocksdb.HistogramData
-class HistogramDataJni : public JavaClass {
- public:
/**
- * Get the Java Class org.rocksdb.HistogramData
+ * Get the Java Method: WriteBatch.Handler#put
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "org/rocksdb/HistogramData");
+ static jmethodID getPutCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "put", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
}
/**
- * Get the Java Method: HistogramData constructor
+ * Get the Java Method: WriteBatch.Handler#put
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getConstructorMethodId(JNIEnv* env) {
+ static jmethodID getPutMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(DDDDDDJJD)V");
+ static jmethodID mid = env->GetMethodID(jclazz, "put", "([B[B)V");
assert(mid != nullptr);
return mid;
}
-};
-// The portal class for org.rocksdb.BackupableDBOptions
-class BackupableDBOptionsJni : public RocksDBNativeClass<
- rocksdb::BackupableDBOptions*, BackupableDBOptionsJni> {
- public:
/**
- * Get the Java Class org.rocksdb.BackupableDBOptions
+ * Get the Java Method: WriteBatch.Handler#merge
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/BackupableDBOptions");
+ static jmethodID getMergeCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "merge", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.BackupEngine
-class BackupEngineJni : public RocksDBNativeClass<
- rocksdb::BackupEngine*, BackupEngineJni> {
- public:
/**
- * Get the Java Class org.rocksdb.BackupableEngine
+ * Get the Java Method: WriteBatch.Handler#merge
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/BackupEngine");
+ static jmethodID getMergeMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "merge", "([B[B)V");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.RocksIterator
-class IteratorJni : public RocksDBNativeClass<
- rocksdb::Iterator*, IteratorJni> {
- public:
/**
- * Get the Java Class org.rocksdb.RocksIterator
+ * Get the Java Method: WriteBatch.Handler#delete
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksIterator");
+ static jmethodID getDeleteCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "delete", "(I[B)V");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.Filter
-class FilterJni : public RocksDBNativeClass<
- std::shared_ptr<rocksdb::FilterPolicy>*, FilterJni> {
- public:
/**
- * Get the Java Class org.rocksdb.Filter
+ * Get the Java Method: WriteBatch.Handler#delete
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/Filter");
+ static jmethodID getDeleteMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "delete", "([B)V");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.ColumnFamilyHandle
-class ColumnFamilyHandleJni : public RocksDBNativeClass<
- rocksdb::ColumnFamilyHandle*, ColumnFamilyHandleJni> {
- public:
/**
- * Get the Java Class org.rocksdb.ColumnFamilyHandle
+ * Get the Java Method: WriteBatch.Handler#singleDelete
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/ColumnFamilyHandle");
+ static jmethodID getSingleDeleteCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "(I[B)V");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.FlushOptions
-class FlushOptionsJni : public RocksDBNativeClass<
- rocksdb::FlushOptions*, FlushOptionsJni> {
- public:
/**
- * Get the Java Class org.rocksdb.FlushOptions
+ * Get the Java Method: WriteBatch.Handler#singleDelete
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/FlushOptions");
+ static jmethodID getSingleDeleteMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "([B)V");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.ComparatorOptions
-class ComparatorOptionsJni : public RocksDBNativeClass<
- rocksdb::ComparatorJniCallbackOptions*, ComparatorOptionsJni> {
- public:
/**
- * Get the Java Class org.rocksdb.ComparatorOptions
+ * Get the Java Method: WriteBatch.Handler#deleteRange
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/ComparatorOptions");
+ static jmethodID getDeleteRangeCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#deleteRange
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getDeleteRangeMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "([B[B)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+
+ /**
+ * Get the Java Method: WriteBatch.Handler#logData
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getLogDataMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "logData", "([B)V");
+ assert(mid != nullptr);
+ return mid;
}
-};
-// The portal class for org.rocksdb.AbstractCompactionFilterFactory
-class AbstractCompactionFilterFactoryJni : public RocksDBNativeClass<
- const rocksdb::CompactionFilterFactoryJniCallback*,
- AbstractCompactionFilterFactoryJni> {
- public:
/**
- * Get the Java Class org.rocksdb.AbstractCompactionFilterFactory
+ * Get the Java Method: WriteBatch.Handler#putBlobIndex
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/AbstractCompactionFilterFactory");
+ static jmethodID getPutBlobIndexCfMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "putBlobIndex", "(I[B[B)V");
+ assert(mid != nullptr);
+ return mid;
}
/**
- * Get the Java Method: AbstractCompactionFilterFactory#name
+ * Get the Java Method: WriteBatch.Handler#markBeginPrepare
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getNameMethodId(JNIEnv* env) {
+ static jmethodID getMarkBeginPrepareMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(
- jclazz, "name", "()Ljava/lang/String;");
+ static jmethodID mid = env->GetMethodID(jclazz, "markBeginPrepare", "()V");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: AbstractCompactionFilterFactory#createCompactionFilter
+ * Get the Java Method: WriteBatch.Handler#markEndPrepare
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getCreateCompactionFilterMethodId(JNIEnv* env) {
+ static jmethodID getMarkEndPrepareMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz,
- "createCompactionFilter",
- "(ZZ)J");
+ static jmethodID mid = env->GetMethodID(jclazz, "markEndPrepare", "([B)V");
assert(mid != nullptr);
return mid;
}
-};
-
-// The portal class for org.rocksdb.AbstractTransactionNotifier
-class AbstractTransactionNotifierJni : public RocksDBNativeClass<
- const rocksdb::TransactionNotifierJniCallback*,
- AbstractTransactionNotifierJni> {
- public:
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/AbstractTransactionNotifier");
- }
- // Get the java method `snapshotCreated`
- // of org.rocksdb.AbstractTransactionNotifier.
- static jmethodID getSnapshotCreatedMethodId(JNIEnv* env) {
+ /**
+ * Get the Java Method: WriteBatch.Handler#markNoop
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getMarkNoopMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "snapshotCreated", "(J)V");
+ static jmethodID mid = env->GetMethodID(jclazz, "markNoop", "(Z)V");
assert(mid != nullptr);
return mid;
}
-};
-// The portal class for org.rocksdb.AbstractComparator
-class AbstractComparatorJni : public RocksDBNativeClass<
- const rocksdb::BaseComparatorJniCallback*,
- AbstractComparatorJni> {
- public:
/**
- * Get the Java Class org.rocksdb.AbstractComparator
+ * Get the Java Method: WriteBatch.Handler#markRollback
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env,
- "org/rocksdb/AbstractComparator");
+ static jmethodID getMarkRollbackMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "markRollback", "([B)V");
+ assert(mid != nullptr);
+ return mid;
}
/**
- * Get the Java Method: Comparator#name
+ * Get the Java Method: WriteBatch.Handler#markCommit
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getNameMethodId(JNIEnv* env) {
+ static jmethodID getMarkCommitMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetMethodID(jclazz, "name", "()Ljava/lang/String;");
+ static jmethodID mid = env->GetMethodID(jclazz, "markCommit", "([B)V");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: Comparator#compare
+ * Get the Java Method: WriteBatch.Handler#shouldContinue
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getCompareMethodId(JNIEnv* env) {
+ static jmethodID getContinueMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetMethodID(jclazz, "compare",
- "(Lorg/rocksdb/AbstractSlice;Lorg/rocksdb/AbstractSlice;)I");
+ static jmethodID mid = env->GetMethodID(jclazz, "shouldContinue", "()Z");
assert(mid != nullptr);
return mid;
}
+};
+class WriteBatchSavePointJni : public JavaClass {
+ public:
/**
- * Get the Java Method: Comparator#findShortestSeparator
+ * Get the Java Class org.rocksdb.WriteBatch.SavePoint
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/WriteBatch$SavePoint");
+ }
+
+ /**
+ * Get the Java Method: HistogramData constructor
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getFindShortestSeparatorMethodId(JNIEnv* env) {
+ static jmethodID getConstructorMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetMethodID(jclazz, "findShortestSeparator",
- "(Ljava/lang/String;Lorg/rocksdb/AbstractSlice;)Ljava/lang/String;");
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JJJ)V");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: Comparator#findShortSuccessor
+ * Create a new Java org.rocksdb.WriteBatch.SavePoint object
*
* @param env A pointer to the Java environment
+ * @param savePoint A pointer to rocksdb::WriteBatch::SavePoint object
*
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
+ * @return A reference to a Java org.rocksdb.WriteBatch.SavePoint object, or
+ * nullptr if an an exception occurs
*/
- static jmethodID getFindShortSuccessorMethodId(JNIEnv* env) {
+ static jobject construct(JNIEnv* env, const SavePoint &save_point) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetMethodID(jclazz, "findShortSuccessor",
- "(Ljava/lang/String;)Ljava/lang/String;");
- assert(mid != nullptr);
- return mid;
+ jmethodID mid = getConstructorMethodId(env);
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jobject jsave_point = env->NewObject(jclazz, mid,
+ static_cast<jlong>(save_point.size),
+ static_cast<jlong>(save_point.count),
+ static_cast<jlong>(save_point.content_flags));
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jsave_point;
}
};
-// The portal class for org.rocksdb.AbstractSlice
-class AbstractSliceJni : public NativeRocksMutableObject<
- const rocksdb::Slice*, AbstractSliceJni> {
+// The portal class for org.rocksdb.WriteBatchWithIndex
+class WriteBatchWithIndexJni : public RocksDBNativeClass<
+ rocksdb::WriteBatchWithIndex*, WriteBatchWithIndexJni> {
public:
/**
- * Get the Java Class org.rocksdb.AbstractSlice
+ * Get the Java Class org.rocksdb.WriteBatchWithIndex
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractSlice");
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/WriteBatchWithIndex");
}
};
-// The portal class for org.rocksdb.Slice
-class SliceJni : public NativeRocksMutableObject<
- const rocksdb::Slice*, AbstractSliceJni> {
+// The portal class for org.rocksdb.HistogramData
+class HistogramDataJni : public JavaClass {
public:
/**
- * Get the Java Class org.rocksdb.Slice
+ * Get the Java Class org.rocksdb.HistogramData
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/Slice");
+ return JavaClass::getJClass(env, "org/rocksdb/HistogramData");
}
/**
- * Constructs a Slice object
+ * Get the Java Method: HistogramData constructor
*
* @param env A pointer to the Java environment
*
- * @return A reference to a Java Slice object, or a nullptr if an
- * exception occurs
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jobject construct0(JNIEnv* env) {
+ static jmethodID getConstructorMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
- if(mid == nullptr) {
- // exception occurred accessing method
- return nullptr;
- }
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "(DDDDDDJJD)V");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
- jobject jslice = env->NewObject(jclazz, mid);
- if(env->ExceptionCheck()) {
- return nullptr;
- }
+// The portal class for org.rocksdb.BackupableDBOptions
+class BackupableDBOptionsJni : public RocksDBNativeClass<
+ rocksdb::BackupableDBOptions*, BackupableDBOptionsJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.BackupableDBOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/BackupableDBOptions");
+ }
+};
- return jslice;
+// The portal class for org.rocksdb.BackupEngine
+class BackupEngineJni : public RocksDBNativeClass<
+ rocksdb::BackupEngine*, BackupEngineJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.BackupableEngine
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/BackupEngine");
}
};
-// The portal class for org.rocksdb.DirectSlice
-class DirectSliceJni : public NativeRocksMutableObject<
- const rocksdb::Slice*, AbstractSliceJni> {
+// The portal class for org.rocksdb.RocksIterator
+class IteratorJni : public RocksDBNativeClass<
+ rocksdb::Iterator*, IteratorJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.RocksIterator
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksIterator");
+ }
+};
+
+// The portal class for org.rocksdb.Filter
+class FilterJni : public RocksDBNativeClass<
+ std::shared_ptr<rocksdb::FilterPolicy>*, FilterJni> {
public:
/**
- * Get the Java Class org.rocksdb.DirectSlice
+ * Get the Java Class org.rocksdb.Filter
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return RocksDBNativeClass::getJClass(env, "org/rocksdb/DirectSlice");
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Filter");
}
+};
+// The portal class for org.rocksdb.ColumnFamilyHandle
+class ColumnFamilyHandleJni : public RocksDBNativeClass<
+ rocksdb::ColumnFamilyHandle*, ColumnFamilyHandleJni> {
+ public:
/**
- * Constructs a DirectSlice object
+ * Get the Java Class org.rocksdb.ColumnFamilyHandle
*
* @param env A pointer to the Java environment
*
- * @return A reference to a Java DirectSlice object, or a nullptr if an
- * exception occurs
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jobject construct0(JNIEnv* env) {
- jclass jclazz = getJClass(env);
- if(jclazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
-
- static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
- if(mid == nullptr) {
- // exception occurred accessing method
- return nullptr;
- }
-
- jobject jdirect_slice = env->NewObject(jclazz, mid);
- if(env->ExceptionCheck()) {
- return nullptr;
- }
-
- return jdirect_slice;
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/ColumnFamilyHandle");
}
};
-// The portal class for java.util.List
-class ListJni : public JavaClass {
+// The portal class for org.rocksdb.FlushOptions
+class FlushOptionsJni : public RocksDBNativeClass<
+ rocksdb::FlushOptions*, FlushOptionsJni> {
public:
/**
- * Get the Java Class java.util.List
+ * Get the Java Class org.rocksdb.FlushOptions
*
* @param env A pointer to the Java environment
*
* ClassFormatError, ClassCircularityError, NoClassDefFoundError,
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jclass getListClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/util/List");
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/FlushOptions");
}
+};
+// The portal class for org.rocksdb.ComparatorOptions
+class ComparatorOptionsJni : public RocksDBNativeClass<
+ rocksdb::ComparatorJniCallbackOptions*, ComparatorOptionsJni> {
+ public:
/**
- * Get the Java Class java.util.ArrayList
+ * Get the Java Class org.rocksdb.ComparatorOptions
*
* @param env A pointer to the Java environment
*
* ClassFormatError, ClassCircularityError, NoClassDefFoundError,
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jclass getArrayListClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/util/ArrayList");
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/ComparatorOptions");
}
+};
+// The portal class for org.rocksdb.AbstractCompactionFilterFactory
+class AbstractCompactionFilterFactoryJni : public RocksDBNativeClass<
+ const rocksdb::CompactionFilterFactoryJniCallback*,
+ AbstractCompactionFilterFactoryJni> {
+ public:
/**
- * Get the Java Class java.util.Iterator
+ * Get the Java Class org.rocksdb.AbstractCompactionFilterFactory
*
* @param env A pointer to the Java environment
*
* ClassFormatError, ClassCircularityError, NoClassDefFoundError,
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jclass getIteratorClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/util/Iterator");
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractCompactionFilterFactory");
}
/**
- * Get the Java Method: List#iterator
+ * Get the Java Method: AbstractCompactionFilterFactory#name
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getIteratorMethod(JNIEnv* env) {
- jclass jlist_clazz = getListClass(env);
- if(jlist_clazz == nullptr) {
+ static jmethodID getNameMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetMethodID(jlist_clazz, "iterator", "()Ljava/util/Iterator;");
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "name", "()Ljava/lang/String;");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: Iterator#hasNext
+ * Get the Java Method: AbstractCompactionFilterFactory#createCompactionFilter
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getHasNextMethod(JNIEnv* env) {
- jclass jiterator_clazz = getIteratorClass(env);
- if(jiterator_clazz == nullptr) {
+ static jmethodID getCreateCompactionFilterMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jiterator_clazz, "hasNext", "()Z");
+ static jmethodID mid = env->GetMethodID(jclazz,
+ "createCompactionFilter",
+ "(ZZ)J");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
+
+// The portal class for org.rocksdb.AbstractTransactionNotifier
+class AbstractTransactionNotifierJni : public RocksDBNativeClass<
+ const rocksdb::TransactionNotifierJniCallback*,
+ AbstractTransactionNotifierJni> {
+ public:
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractTransactionNotifier");
+ }
+
+ // Get the java method `snapshotCreated`
+ // of org.rocksdb.AbstractTransactionNotifier.
+ static jmethodID getSnapshotCreatedMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid = env->GetMethodID(jclazz, "snapshotCreated", "(J)V");
assert(mid != nullptr);
return mid;
}
+};
+
+// The portal class for org.rocksdb.AbstractComparator
+class AbstractComparatorJni : public RocksDBNativeClass<
+ const rocksdb::BaseComparatorJniCallback*,
+ AbstractComparatorJni> {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.AbstractComparator
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractComparator");
+ }
/**
- * Get the Java Method: Iterator#next
+ * Get the Java Method: Comparator#name
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getNextMethod(JNIEnv* env) {
- jclass jiterator_clazz = getIteratorClass(env);
- if(jiterator_clazz == nullptr) {
+ static jmethodID getNameMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
static jmethodID mid =
- env->GetMethodID(jiterator_clazz, "next", "()Ljava/lang/Object;");
+ env->GetMethodID(jclazz, "name", "()Ljava/lang/String;");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: ArrayList constructor
+ * Get the Java Method: Comparator#compare
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getArrayListConstructorMethodId(JNIEnv* env) {
- jclass jarray_list_clazz = getArrayListClass(env);
- if(jarray_list_clazz == nullptr) {
+ static jmethodID getCompareMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
+
static jmethodID mid =
- env->GetMethodID(jarray_list_clazz, "<init>", "(I)V");
+ env->GetMethodID(jclazz, "compare",
+ "(Lorg/rocksdb/AbstractSlice;Lorg/rocksdb/AbstractSlice;)I");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: List#add
+ * Get the Java Method: Comparator#findShortestSeparator
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getListAddMethodId(JNIEnv* env) {
- jclass jlist_clazz = getListClass(env);
- if(jlist_clazz == nullptr) {
+ static jmethodID getFindShortestSeparatorMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
static jmethodID mid =
- env->GetMethodID(jlist_clazz, "add", "(Ljava/lang/Object;)Z");
+ env->GetMethodID(jclazz, "findShortestSeparator",
+ "(Ljava/lang/String;Lorg/rocksdb/AbstractSlice;)Ljava/lang/String;");
assert(mid != nullptr);
return mid;
}
-};
-// The portal class for java.lang.Byte
-class ByteJni : public JavaClass {
- public:
/**
- * Get the Java Class java.lang.Byte
+ * Get the Java Method: Comparator#findShortSuccessor
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/lang/Byte");
+ static jmethodID getFindShortSuccessorMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "findShortSuccessor",
+ "(Ljava/lang/String;)Ljava/lang/String;");
+ assert(mid != nullptr);
+ return mid;
}
+};
+// The portal class for org.rocksdb.AbstractSlice
+class AbstractSliceJni : public NativeRocksMutableObject<
+ const rocksdb::Slice*, AbstractSliceJni> {
+ public:
/**
- * Get the Java Class byte[]
+ * Get the Java Class org.rocksdb.AbstractSlice
*
* @param env A pointer to the Java environment
*
* ClassFormatError, ClassCircularityError, NoClassDefFoundError,
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
- static jclass getArrayJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "[B");
- }
-
- /**
- * Creates a new 2-dimensional Java Byte Array byte[][]
- *
- * @param env A pointer to the Java environment
- * @param len The size of the first dimension
- *
- * @return A reference to the Java byte[][] or nullptr if an exception occurs
- */
- static jobjectArray new2dByteArray(JNIEnv* env, const jsize len) {
- jclass clazz = getArrayJClass(env);
- if(clazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
-
- return env->NewObjectArray(len, clazz, nullptr);
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractSlice");
}
+};
+// The portal class for org.rocksdb.Slice
+class SliceJni : public NativeRocksMutableObject<
+ const rocksdb::Slice*, AbstractSliceJni> {
+ public:
/**
- * Get the Java Method: Byte#byteValue
+ * Get the Java Class org.rocksdb.Slice
*
* @param env A pointer to the Java environment
*
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retrieved
- */
- static jmethodID getByteValueMethod(JNIEnv* env) {
- jclass clazz = getJClass(env);
- if(clazz == nullptr) {
- // exception occurred accessing class
- return nullptr;
- }
-
- static jmethodID mid = env->GetMethodID(clazz, "byteValue", "()B");
- assert(mid != nullptr);
- return mid;
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/Slice");
}
/**
- * Calls the Java Method: Byte#valueOf, returning a constructed Byte jobject
+ * Constructs a Slice object
*
* @param env A pointer to the Java environment
*
- * @return A constructing Byte object or nullptr if the class or method id could not
- * be retrieved, or an exception occurred
+ * @return A reference to a Java Slice object, or a nullptr if an
+ * exception occurs
*/
- static jobject valueOf(JNIEnv* env, jbyte jprimitive_byte) {
- jclass clazz = getJClass(env);
- if (clazz == nullptr) {
+ static jobject construct0(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetStaticMethodID(clazz, "valueOf", "(B)Ljava/lang/Byte;");
- if (mid == nullptr) {
- // exception thrown: NoSuchMethodException or OutOfMemoryError
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
+ if(mid == nullptr) {
+ // exception occurred accessing method
return nullptr;
}
- const jobject jbyte_obj =
- env->CallStaticObjectMethod(clazz, mid, jprimitive_byte);
- if (env->ExceptionCheck()) {
- // exception occurred
+ jobject jslice = env->NewObject(jclazz, mid);
+ if(env->ExceptionCheck()) {
return nullptr;
}
- return jbyte_obj;
+ return jslice;
}
-
};
-// The portal class for java.lang.StringBuilder
-class StringBuilderJni : public JavaClass {
- public:
+// The portal class for org.rocksdb.DirectSlice
+class DirectSliceJni : public NativeRocksMutableObject<
+ const rocksdb::Slice*, AbstractSliceJni> {
+ public:
/**
- * Get the Java Class java.lang.StringBuilder
+ * Get the Java Class org.rocksdb.DirectSlice
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/lang/StringBuilder");
+ return RocksDBNativeClass::getJClass(env, "org/rocksdb/DirectSlice");
}
/**
- * Get the Java Method: StringBuilder#append
+ * Constructs a DirectSlice object
*
* @param env A pointer to the Java environment
*
- * @return The Java Method ID or nullptr if the class or method id could not
- * be retieved
+ * @return A reference to a Java DirectSlice object, or a nullptr if an
+ * exception occurs
*/
- static jmethodID getListAddMethodId(JNIEnv* env) {
+ static jobject construct0(JNIEnv* env) {
jclass jclazz = getJClass(env);
if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetMethodID(jclazz, "append",
- "(Ljava/lang/String;)Ljava/lang/StringBuilder;");
- assert(mid != nullptr);
- return mid;
- }
-
- /**
- * Appends a C-style string to a StringBuilder
- *
- * @param env A pointer to the Java environment
- * @param jstring_builder Reference to a java.lang.StringBuilder
- * @param c_str A C-style string to append to the StringBuilder
- *
- * @return A reference to the updated StringBuilder, or a nullptr if
- * an exception occurs
- */
- static jobject append(JNIEnv* env, jobject jstring_builder,
- const char* c_str) {
- jmethodID mid = getListAddMethodId(env);
+ static jmethodID mid = env->GetMethodID(jclazz, "<init>", "()V");
if(mid == nullptr) {
- // exception occurred accessing class or method
- return nullptr;
- }
-
- jstring new_value_str = env->NewStringUTF(c_str);
- if(new_value_str == nullptr) {
- // exception thrown: OutOfMemoryError
+ // exception occurred accessing method
return nullptr;
}
- jobject jresult_string_builder =
- env->CallObjectMethod(jstring_builder, mid, new_value_str);
+ jobject jdirect_slice = env->NewObject(jclazz, mid);
if(env->ExceptionCheck()) {
- // exception occurred
- env->DeleteLocalRef(new_value_str);
return nullptr;
}
- return jresult_string_builder;
+ return jdirect_slice;
}
};
}
};
-// various utility functions for working with RocksDB and JNI
-class JniUtil {
+class AbstractTableFilterJni : public RocksDBNativeClass<const rocksdb::TableFilterJniCallback*, AbstractTableFilterJni> {
public:
- /**
- * Obtains a reference to the JNIEnv from
- * the JVM
- *
- * If the current thread is not attached to the JavaVM
- * then it will be attached so as to retrieve the JNIEnv
- *
- * If a thread is attached, it must later be manually
- * released by calling JavaVM::DetachCurrentThread.
- * This can be handled by always matching calls to this
- * function with calls to {@link JniUtil::releaseJniEnv(JavaVM*, jboolean)}
- *
- * @param jvm (IN) A pointer to the JavaVM instance
- * @param attached (OUT) A pointer to a boolean which
- * will be set to JNI_TRUE if we had to attach the thread
- *
- * @return A pointer to the JNIEnv or nullptr if a fatal error
- * occurs and the JNIEnv cannot be retrieved
- */
- static JNIEnv* getJniEnv(JavaVM* jvm, jboolean* attached) {
- assert(jvm != nullptr);
+ /**
+ * Get the Java Method: TableFilter#filter(TableProperties)
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getFilterMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
- JNIEnv *env;
- const jint env_rs = jvm->GetEnv(reinterpret_cast<void**>(&env),
- JNI_VERSION_1_2);
+ static jmethodID mid =
+ env->GetMethodID(jclazz, "filter", "(Lorg/rocksdb/TableProperties;)Z");
+ assert(mid != nullptr);
+ return mid;
+ }
- if(env_rs == JNI_OK) {
- // current thread is already attached, return the JNIEnv
- *attached = JNI_FALSE;
- return env;
- } else if(env_rs == JNI_EDETACHED) {
- // current thread is not attached, attempt to attach
- const jint rs_attach = jvm->AttachCurrentThread(reinterpret_cast<void**>(&env), NULL);
- if(rs_attach == JNI_OK) {
- *attached = JNI_TRUE;
- return env;
- } else {
- // error, could not attach the thread
- std::cerr << "JniUtil::getJinEnv - Fatal: could not attach current thread to JVM!" << std::endl;
- return nullptr;
- }
- } else if(env_rs == JNI_EVERSION) {
- // error, JDK does not support JNI_VERSION_1_2+
- std::cerr << "JniUtil::getJinEnv - Fatal: JDK does not support JNI_VERSION_1_2" << std::endl;
- return nullptr;
- } else {
- std::cerr << "JniUtil::getJinEnv - Fatal: Unknown error: env_rs=" << env_rs << std::endl;
- return nullptr;
- }
+ private:
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/TableFilter");
+ }
+};
+
+class TablePropertiesJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.TableProperties object.
+ *
+ * @param env A pointer to the Java environment
+ * @param table_properties A Cpp table properties object
+ *
+ * @return A reference to a Java org.rocksdb.TableProperties object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppTableProperties(JNIEnv* env, const rocksdb::TableProperties& table_properties) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JJJJJJJJJJJJJJJJJJJ[BLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/util/Map;Ljava/util/Map;Ljava/util/Map;)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ jbyteArray jcolumn_family_name = rocksdb::JniUtil::copyBytes(env, table_properties.column_family_name);
+ if (jcolumn_family_name == nullptr) {
+ // exception occurred creating java string
+ return nullptr;
+ }
+
+ jstring jfilter_policy_name = rocksdb::JniUtil::toJavaString(env, &table_properties.filter_policy_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ return nullptr;
+ }
+
+ jstring jcomparator_name = rocksdb::JniUtil::toJavaString(env, &table_properties.comparator_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ return nullptr;
+ }
+
+ jstring jmerge_operator_name = rocksdb::JniUtil::toJavaString(env, &table_properties.merge_operator_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ return nullptr;
+ }
+
+ jstring jprefix_extractor_name = rocksdb::JniUtil::toJavaString(env, &table_properties.prefix_extractor_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ return nullptr;
+ }
+
+ jstring jproperty_collectors_names = rocksdb::JniUtil::toJavaString(env, &table_properties.property_collectors_names, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ return nullptr;
+ }
+
+ jstring jcompression_name = rocksdb::JniUtil::toJavaString(env, &table_properties.compression_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ return nullptr;
+ }
+
+ // Map<String, String>
+ jobject juser_collected_properties = rocksdb::HashMapJni::fromCppMap(env, &table_properties.user_collected_properties);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java map
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ env->DeleteLocalRef(jcompression_name);
+ return nullptr;
+ }
+
+ // Map<String, String>
+ jobject jreadable_properties = rocksdb::HashMapJni::fromCppMap(env, &table_properties.readable_properties);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java map
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ env->DeleteLocalRef(jcompression_name);
+ env->DeleteLocalRef(juser_collected_properties);
+ return nullptr;
+ }
+
+ // Map<String, Long>
+ jobject jproperties_offsets = rocksdb::HashMapJni::fromCppMap(env, &table_properties.properties_offsets);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java map
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfilter_policy_name);
+ env->DeleteLocalRef(jcomparator_name);
+ env->DeleteLocalRef(jmerge_operator_name);
+ env->DeleteLocalRef(jprefix_extractor_name);
+ env->DeleteLocalRef(jproperty_collectors_names);
+ env->DeleteLocalRef(jcompression_name);
+ env->DeleteLocalRef(juser_collected_properties);
+ env->DeleteLocalRef(jreadable_properties);
+ return nullptr;
+ }
+
+ jobject jtable_properties = env->NewObject(jclazz, mid,
+ static_cast<jlong>(table_properties.data_size),
+ static_cast<jlong>(table_properties.index_size),
+ static_cast<jlong>(table_properties.index_partitions),
+ static_cast<jlong>(table_properties.top_level_index_size),
+ static_cast<jlong>(table_properties.index_key_is_user_key),
+ static_cast<jlong>(table_properties.index_value_is_delta_encoded),
+ static_cast<jlong>(table_properties.filter_size),
+ static_cast<jlong>(table_properties.raw_key_size),
+ static_cast<jlong>(table_properties.raw_value_size),
+ static_cast<jlong>(table_properties.num_data_blocks),
+ static_cast<jlong>(table_properties.num_entries),
+ static_cast<jlong>(table_properties.num_deletions),
+ static_cast<jlong>(table_properties.num_merge_operands),
+ static_cast<jlong>(table_properties.num_range_deletions),
+ static_cast<jlong>(table_properties.format_version),
+ static_cast<jlong>(table_properties.fixed_key_len),
+ static_cast<jlong>(table_properties.column_family_id),
+ static_cast<jlong>(table_properties.creation_time),
+ static_cast<jlong>(table_properties.oldest_key_time),
+ jcolumn_family_name,
+ jfilter_policy_name,
+ jcomparator_name,
+ jmerge_operator_name,
+ jprefix_extractor_name,
+ jproperty_collectors_names,
+ jcompression_name,
+ juser_collected_properties,
+ jreadable_properties,
+ jproperties_offsets
+ );
+
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ return jtable_properties;
+ }
+
+ private:
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/TableProperties");
+ }
+};
+
+class ColumnFamilyDescriptorJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ColumnFamilyDescriptor
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyDescriptor");
+ }
+
+ /**
+ * Create a new Java org.rocksdb.ColumnFamilyDescriptor object with the same
+ * properties as the provided C++ rocksdb::ColumnFamilyDescriptor object
+ *
+ * @param env A pointer to the Java environment
+ * @param cfd A pointer to rocksdb::ColumnFamilyDescriptor object
+ *
+ * @return A reference to a Java org.rocksdb.ColumnFamilyDescriptor object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env, ColumnFamilyDescriptor* cfd) {
+ jbyteArray jcf_name = JniUtil::copyBytes(env, cfd->name);
+ jobject cfopts = ColumnFamilyOptionsJni::construct(env, &(cfd->options));
+
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>",
+ "([BLorg/rocksdb/ColumnFamilyOptions;)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ env->DeleteLocalRef(jcf_name);
+ return nullptr;
}
- /**
- * Counterpart to {@link JniUtil::getJniEnv(JavaVM*, jboolean*)}
- *
- * Detachess the current thread from the JVM if it was previously
- * attached
- *
- * @param jvm (IN) A pointer to the JavaVM instance
- * @param attached (IN) JNI_TRUE if we previously had to attach the thread
- * to the JavaVM to get the JNIEnv
- */
- static void releaseJniEnv(JavaVM* jvm, jboolean& attached) {
- assert(jvm != nullptr);
- if(attached == JNI_TRUE) {
- const jint rs_detach = jvm->DetachCurrentThread();
- assert(rs_detach == JNI_OK);
- if(rs_detach != JNI_OK) {
- std::cerr << "JniUtil::getJinEnv - Warn: Unable to detach current thread from JVM!" << std::endl;
- }
- }
+ jobject jcfd = env->NewObject(jclazz, mid, jcf_name, cfopts);
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jcf_name);
+ return nullptr;
}
- /**
- * Copies a Java String[] to a C++ std::vector<std::string>
- *
- * @param env (IN) A pointer to the java environment
- * @param jss (IN) The Java String array to copy
- * @param has_exception (OUT) will be set to JNI_TRUE
- * if an OutOfMemoryError or ArrayIndexOutOfBoundsException
- * exception occurs
- *
- * @return A std::vector<std::string> containing copies of the Java strings
- */
- static std::vector<std::string> copyStrings(JNIEnv* env,
- jobjectArray jss, jboolean* has_exception) {
- return rocksdb::JniUtil::copyStrings(env, jss,
- env->GetArrayLength(jss), has_exception);
+ return jcfd;
+ }
+
+ /**
+ * Get the Java Method: ColumnFamilyDescriptor#columnFamilyName
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getColumnFamilyNameMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
}
- /**
- * Copies a Java String[] to a C++ std::vector<std::string>
- *
- * @param env (IN) A pointer to the java environment
- * @param jss (IN) The Java String array to copy
- * @param jss_len (IN) The length of the Java String array to copy
- * @param has_exception (OUT) will be set to JNI_TRUE
- * if an OutOfMemoryError or ArrayIndexOutOfBoundsException
- * exception occurs
- *
- * @return A std::vector<std::string> containing copies of the Java strings
- */
- static std::vector<std::string> copyStrings(JNIEnv* env,
- jobjectArray jss, const jsize jss_len, jboolean* has_exception) {
- std::vector<std::string> strs;
- for (jsize i = 0; i < jss_len; i++) {
- jobject js = env->GetObjectArrayElement(jss, i);
- if(env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- *has_exception = JNI_TRUE;
- return strs;
- }
+ static jmethodID mid = env->GetMethodID(jclazz, "columnFamilyName", "()[B");
+ assert(mid != nullptr);
+ return mid;
+ }
- jstring jstr = static_cast<jstring>(js);
- const char* str = env->GetStringUTFChars(jstr, nullptr);
- if(str == nullptr) {
- // exception thrown: OutOfMemoryError
- env->DeleteLocalRef(js);
- *has_exception = JNI_TRUE;
- return strs;
- }
+ /**
+ * Get the Java Method: ColumnFamilyDescriptor#columnFamilyOptions
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
+ */
+ static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
- strs.push_back(std::string(str));
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "columnFamilyOptions", "()Lorg/rocksdb/ColumnFamilyOptions;");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
- env->ReleaseStringUTFChars(jstr, str);
- env->DeleteLocalRef(js);
- }
+// The portal class for org.rocksdb.IndexType
+class IndexTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.IndexType for the provided
+ // C++ rocksdb::IndexType enum
+ static jbyte toJavaIndexType(
+ const rocksdb::BlockBasedTableOptions::IndexType& index_type) {
+ switch(index_type) {
+ case rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch:
+ return 0x0;
+ case rocksdb::BlockBasedTableOptions::IndexType::kHashSearch:
+ return 0x1;
+ case rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch:
+ return 0x2;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- *has_exception = JNI_FALSE;
- return strs;
- }
+ // Returns the equivalent C++ rocksdb::IndexType enum for the
+ // provided Java org.rocksdb.IndexType
+ static rocksdb::BlockBasedTableOptions::IndexType toCppIndexType(
+ jbyte jindex_type) {
+ switch(jindex_type) {
+ case 0x0:
+ return rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch;
+ case 0x1:
+ return rocksdb::BlockBasedTableOptions::IndexType::kHashSearch;
+ case 0x2:
+ return rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
+ default:
+ // undefined/default
+ return rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch;
+ }
+ }
+};
- /**
- * Copies a jstring to a C-style null-terminated byte string
- * and releases the original jstring
- *
- * The jstring is copied as UTF-8
- *
- * If an exception occurs, then JNIEnv::ExceptionCheck()
- * will have been called
- *
- * @param env (IN) A pointer to the java environment
- * @param js (IN) The java string to copy
- * @param has_exception (OUT) will be set to JNI_TRUE
- * if an OutOfMemoryError exception occurs
- *
- * @return A pointer to the copied string, or a
- * nullptr if has_exception == JNI_TRUE
- */
- static std::unique_ptr<char[]> copyString(JNIEnv* env, jstring js,
- jboolean* has_exception) {
- const char *utf = env->GetStringUTFChars(js, nullptr);
- if(utf == nullptr) {
- // exception thrown: OutOfMemoryError
- env->ExceptionCheck();
- *has_exception = JNI_TRUE;
- return nullptr;
- } else if(env->ExceptionCheck()) {
- // exception thrown
- env->ReleaseStringUTFChars(js, utf);
- *has_exception = JNI_TRUE;
- return nullptr;
- }
+// The portal class for org.rocksdb.DataBlockIndexType
+class DataBlockIndexTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.DataBlockIndexType for the provided
+ // C++ rocksdb::DataBlockIndexType enum
+ static jbyte toJavaDataBlockIndexType(
+ const rocksdb::BlockBasedTableOptions::DataBlockIndexType& index_type) {
+ switch(index_type) {
+ case rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinarySearch:
+ return 0x0;
+ case rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinaryAndHash:
+ return 0x1;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- const jsize utf_len = env->GetStringUTFLength(js);
- std::unique_ptr<char[]> str(new char[utf_len + 1]); // Note: + 1 is needed for the c_str null terminator
- std::strcpy(str.get(), utf);
- env->ReleaseStringUTFChars(js, utf);
- *has_exception = JNI_FALSE;
- return str;
- }
+ // Returns the equivalent C++ rocksdb::DataBlockIndexType enum for the
+ // provided Java org.rocksdb.DataBlockIndexType
+ static rocksdb::BlockBasedTableOptions::DataBlockIndexType toCppDataBlockIndexType(
+ jbyte jindex_type) {
+ switch(jindex_type) {
+ case 0x0:
+ return rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinarySearch;
+ case 0x1:
+ return rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinaryAndHash;
+ default:
+ // undefined/default
+ return rocksdb::BlockBasedTableOptions::DataBlockIndexType::kDataBlockBinarySearch;
+ }
+ }
+};
- /**
- * Copies a jstring to a std::string
- * and releases the original jstring
- *
- * If an exception occurs, then JNIEnv::ExceptionCheck()
- * will have been called
- *
- * @param env (IN) A pointer to the java environment
- * @param js (IN) The java string to copy
- * @param has_exception (OUT) will be set to JNI_TRUE
- * if an OutOfMemoryError exception occurs
- *
- * @return A std::string copy of the jstring, or an
- * empty std::string if has_exception == JNI_TRUE
- */
- static std::string copyStdString(JNIEnv* env, jstring js,
- jboolean* has_exception) {
- const char *utf = env->GetStringUTFChars(js, nullptr);
- if(utf == nullptr) {
- // exception thrown: OutOfMemoryError
- env->ExceptionCheck();
- *has_exception = JNI_TRUE;
- return std::string();
- } else if(env->ExceptionCheck()) {
- // exception thrown
- env->ReleaseStringUTFChars(js, utf);
- *has_exception = JNI_TRUE;
- return std::string();
- }
+// The portal class for org.rocksdb.ChecksumType
+class ChecksumTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.ChecksumType for the provided
+ // C++ rocksdb::ChecksumType enum
+ static jbyte toJavaChecksumType(
+ const rocksdb::ChecksumType& checksum_type) {
+ switch(checksum_type) {
+ case rocksdb::ChecksumType::kNoChecksum:
+ return 0x0;
+ case rocksdb::ChecksumType::kCRC32c:
+ return 0x1;
+ case rocksdb::ChecksumType::kxxHash:
+ return 0x2;
+ case rocksdb::ChecksumType::kxxHash64:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- std::string name(utf);
- env->ReleaseStringUTFChars(js, utf);
- *has_exception = JNI_FALSE;
- return name;
- }
+ // Returns the equivalent C++ rocksdb::ChecksumType enum for the
+ // provided Java org.rocksdb.ChecksumType
+ static rocksdb::ChecksumType toCppChecksumType(
+ jbyte jchecksum_type) {
+ switch(jchecksum_type) {
+ case 0x0:
+ return rocksdb::ChecksumType::kNoChecksum;
+ case 0x1:
+ return rocksdb::ChecksumType::kCRC32c;
+ case 0x2:
+ return rocksdb::ChecksumType::kxxHash;
+ case 0x3:
+ return rocksdb::ChecksumType::kxxHash64;
+ default:
+ // undefined/default
+ return rocksdb::ChecksumType::kCRC32c;
+ }
+ }
+};
- /**
- * Copies bytes from a std::string to a jByteArray
- *
- * @param env A pointer to the java environment
- * @param bytes The bytes to copy
- *
- * @return the Java byte[] or nullptr if an exception occurs
- *
- * @throws RocksDBException thrown
- * if memory size to copy exceeds general java specific array size limitation.
- */
- static jbyteArray copyBytes(JNIEnv* env, std::string bytes) {
- return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size());
- }
+// The portal class for org.rocksdb.Priority
+class PriorityJni {
+ public:
+ // Returns the equivalent org.rocksdb.Priority for the provided
+ // C++ rocksdb::Env::Priority enum
+ static jbyte toJavaPriority(
+ const rocksdb::Env::Priority& priority) {
+ switch(priority) {
+ case rocksdb::Env::Priority::BOTTOM:
+ return 0x0;
+ case rocksdb::Env::Priority::LOW:
+ return 0x1;
+ case rocksdb::Env::Priority::HIGH:
+ return 0x2;
+ case rocksdb::Env::Priority::TOTAL:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- /**
- * Given a Java byte[][] which is an array of java.lang.Strings
- * where each String is a byte[], the passed function `string_fn`
- * will be called on each String, the result is the collected by
- * calling the passed function `collector_fn`
- *
- * @param env (IN) A pointer to the java environment
- * @param jbyte_strings (IN) A Java array of Strings expressed as bytes
- * @param string_fn (IN) A transform function to call for each String
- * @param collector_fn (IN) A collector which is called for the result
- * of each `string_fn`
- * @param has_exception (OUT) will be set to JNI_TRUE
- * if an ArrayIndexOutOfBoundsException or OutOfMemoryError
- * exception occurs
- */
- template <typename T> static void byteStrings(JNIEnv* env,
- jobjectArray jbyte_strings,
- std::function<T(const char*, const size_t)> string_fn,
- std::function<void(size_t, T)> collector_fn,
- jboolean *has_exception) {
- const jsize jlen = env->GetArrayLength(jbyte_strings);
+ // Returns the equivalent C++ rocksdb::env::Priority enum for the
+ // provided Java org.rocksdb.Priority
+ static rocksdb::Env::Priority toCppPriority(
+ jbyte jpriority) {
+ switch(jpriority) {
+ case 0x0:
+ return rocksdb::Env::Priority::BOTTOM;
+ case 0x1:
+ return rocksdb::Env::Priority::LOW;
+ case 0x2:
+ return rocksdb::Env::Priority::HIGH;
+ case 0x3:
+ return rocksdb::Env::Priority::TOTAL;
+ default:
+ // undefined/default
+ return rocksdb::Env::Priority::LOW;
+ }
+ }
+};
- for(jsize i = 0; i < jlen; i++) {
- jobject jbyte_string_obj = env->GetObjectArrayElement(jbyte_strings, i);
- if(env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- *has_exception = JNI_TRUE; // signal error
- return;
- }
+// The portal class for org.rocksdb.ThreadType
+class ThreadTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.ThreadType for the provided
+ // C++ rocksdb::ThreadStatus::ThreadType enum
+ static jbyte toJavaThreadType(
+ const rocksdb::ThreadStatus::ThreadType& thread_type) {
+ switch(thread_type) {
+ case rocksdb::ThreadStatus::ThreadType::HIGH_PRIORITY:
+ return 0x0;
+ case rocksdb::ThreadStatus::ThreadType::LOW_PRIORITY:
+ return 0x1;
+ case rocksdb::ThreadStatus::ThreadType::USER:
+ return 0x2;
+ case rocksdb::ThreadStatus::ThreadType::BOTTOM_PRIORITY:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- jbyteArray jbyte_string_ary =
- reinterpret_cast<jbyteArray>(jbyte_string_obj);
- T result = byteString(env, jbyte_string_ary, string_fn, has_exception);
+ // Returns the equivalent C++ rocksdb::ThreadStatus::ThreadType enum for the
+ // provided Java org.rocksdb.ThreadType
+ static rocksdb::ThreadStatus::ThreadType toCppThreadType(
+ jbyte jthread_type) {
+ switch(jthread_type) {
+ case 0x0:
+ return rocksdb::ThreadStatus::ThreadType::HIGH_PRIORITY;
+ case 0x1:
+ return rocksdb::ThreadStatus::ThreadType::LOW_PRIORITY;
+ case 0x2:
+ return ThreadStatus::ThreadType::USER;
+ case 0x3:
+ return rocksdb::ThreadStatus::ThreadType::BOTTOM_PRIORITY;
+ default:
+ // undefined/default
+ return rocksdb::ThreadStatus::ThreadType::LOW_PRIORITY;
+ }
+ }
+};
- env->DeleteLocalRef(jbyte_string_obj);
+// The portal class for org.rocksdb.OperationType
+class OperationTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.OperationType for the provided
+ // C++ rocksdb::ThreadStatus::OperationType enum
+ static jbyte toJavaOperationType(
+ const rocksdb::ThreadStatus::OperationType& operation_type) {
+ switch(operation_type) {
+ case rocksdb::ThreadStatus::OperationType::OP_UNKNOWN:
+ return 0x0;
+ case rocksdb::ThreadStatus::OperationType::OP_COMPACTION:
+ return 0x1;
+ case rocksdb::ThreadStatus::OperationType::OP_FLUSH:
+ return 0x2;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- if(*has_exception == JNI_TRUE) {
- // exception thrown: OutOfMemoryError
- return;
- }
+ // Returns the equivalent C++ rocksdb::ThreadStatus::OperationType enum for the
+ // provided Java org.rocksdb.OperationType
+ static rocksdb::ThreadStatus::OperationType toCppOperationType(
+ jbyte joperation_type) {
+ switch(joperation_type) {
+ case 0x0:
+ return rocksdb::ThreadStatus::OperationType::OP_UNKNOWN;
+ case 0x1:
+ return rocksdb::ThreadStatus::OperationType::OP_COMPACTION;
+ case 0x2:
+ return rocksdb::ThreadStatus::OperationType::OP_FLUSH;
+ default:
+ // undefined/default
+ return rocksdb::ThreadStatus::OperationType::OP_UNKNOWN;
+ }
+ }
+};
- collector_fn(i, result);
- }
+// The portal class for org.rocksdb.OperationStage
+class OperationStageJni {
+ public:
+ // Returns the equivalent org.rocksdb.OperationStage for the provided
+ // C++ rocksdb::ThreadStatus::OperationStage enum
+ static jbyte toJavaOperationStage(
+ const rocksdb::ThreadStatus::OperationStage& operation_stage) {
+ switch(operation_stage) {
+ case rocksdb::ThreadStatus::OperationStage::STAGE_UNKNOWN:
+ return 0x0;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_RUN:
+ return 0x1;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_WRITE_L0:
+ return 0x2;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PREPARE:
+ return 0x3;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_RUN:
+ return 0x4;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PROCESS_KV:
+ return 0x5;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_INSTALL:
+ return 0x6;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_SYNC_FILE:
+ return 0x7;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_PICK_MEMTABLES_TO_FLUSH:
+ return 0x8;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_ROLLBACK:
+ return 0x9;
+ case rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS:
+ return 0xA;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- *has_exception = JNI_FALSE;
- }
+ // Returns the equivalent C++ rocksdb::ThreadStatus::OperationStage enum for the
+ // provided Java org.rocksdb.OperationStage
+ static rocksdb::ThreadStatus::OperationStage toCppOperationStage(
+ jbyte joperation_stage) {
+ switch(joperation_stage) {
+ case 0x0:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_UNKNOWN;
+ case 0x1:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_RUN;
+ case 0x2:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_FLUSH_WRITE_L0;
+ case 0x3:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PREPARE;
+ case 0x4:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_RUN;
+ case 0x5:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_PROCESS_KV;
+ case 0x6:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_INSTALL;
+ case 0x7:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_COMPACTION_SYNC_FILE;
+ case 0x8:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_PICK_MEMTABLES_TO_FLUSH;
+ case 0x9:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_ROLLBACK;
+ case 0xA:
+ return rocksdb::ThreadStatus::OperationStage::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS;
+ default:
+ // undefined/default
+ return rocksdb::ThreadStatus::OperationStage::STAGE_UNKNOWN;
+ }
+ }
+};
- /**
- * Given a Java String which is expressed as a Java Byte Array byte[],
- * the passed function `string_fn` will be called on the String
- * and the result returned
- *
- * @param env (IN) A pointer to the java environment
- * @param jbyte_string_ary (IN) A Java String expressed in bytes
- * @param string_fn (IN) A transform function to call on the String
- * @param has_exception (OUT) will be set to JNI_TRUE
- * if an OutOfMemoryError exception occurs
- */
- template <typename T> static T byteString(JNIEnv* env,
- jbyteArray jbyte_string_ary,
- std::function<T(const char*, const size_t)> string_fn,
- jboolean* has_exception) {
- const jsize jbyte_string_len = env->GetArrayLength(jbyte_string_ary);
- return byteString<T>(env, jbyte_string_ary, jbyte_string_len, string_fn,
- has_exception);
- }
+// The portal class for org.rocksdb.StateType
+class StateTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.StateType for the provided
+ // C++ rocksdb::ThreadStatus::StateType enum
+ static jbyte toJavaStateType(
+ const rocksdb::ThreadStatus::StateType& state_type) {
+ switch(state_type) {
+ case rocksdb::ThreadStatus::StateType::STATE_UNKNOWN:
+ return 0x0;
+ case rocksdb::ThreadStatus::StateType::STATE_MUTEX_WAIT:
+ return 0x1;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- /**
- * Given a Java String which is expressed as a Java Byte Array byte[],
- * the passed function `string_fn` will be called on the String
- * and the result returned
- *
- * @param env (IN) A pointer to the java environment
- * @param jbyte_string_ary (IN) A Java String expressed in bytes
- * @param jbyte_string_len (IN) The length of the Java String
- * expressed in bytes
- * @param string_fn (IN) A transform function to call on the String
- * @param has_exception (OUT) will be set to JNI_TRUE
- * if an OutOfMemoryError exception occurs
- */
- template <typename T> static T byteString(JNIEnv* env,
- jbyteArray jbyte_string_ary, const jsize jbyte_string_len,
- std::function<T(const char*, const size_t)> string_fn,
- jboolean* has_exception) {
- jbyte* jbyte_string =
- env->GetByteArrayElements(jbyte_string_ary, nullptr);
- if(jbyte_string == nullptr) {
- // exception thrown: OutOfMemoryError
- *has_exception = JNI_TRUE;
- return nullptr; // signal error
- }
+ // Returns the equivalent C++ rocksdb::ThreadStatus::StateType enum for the
+ // provided Java org.rocksdb.StateType
+ static rocksdb::ThreadStatus::StateType toCppStateType(
+ jbyte jstate_type) {
+ switch(jstate_type) {
+ case 0x0:
+ return rocksdb::ThreadStatus::StateType::STATE_UNKNOWN;
+ case 0x1:
+ return rocksdb::ThreadStatus::StateType::STATE_MUTEX_WAIT;
+ default:
+ // undefined/default
+ return rocksdb::ThreadStatus::StateType::STATE_UNKNOWN;
+ }
+ }
+};
- T result =
- string_fn(reinterpret_cast<char *>(jbyte_string), jbyte_string_len);
+// The portal class for org.rocksdb.ThreadStatus
+class ThreadStatusJni : public JavaClass {
+ public:
+ /**
+ * Get the Java Class org.rocksdb.ThreadStatus
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env,
+ "org/rocksdb/ThreadStatus");
+ }
- env->ReleaseByteArrayElements(jbyte_string_ary, jbyte_string, JNI_ABORT);
+ /**
+ * Create a new Java org.rocksdb.ThreadStatus object with the same
+ * properties as the provided C++ rocksdb::ThreadStatus object
+ *
+ * @param env A pointer to the Java environment
+ * @param thread_status A pointer to rocksdb::ThreadStatus object
+ *
+ * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject construct(JNIEnv* env,
+ const rocksdb::ThreadStatus* thread_status) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
- *has_exception = JNI_FALSE;
- return result;
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JBLjava/lang/String;Ljava/lang/String;BJB[JB)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
}
- /**
- * Converts a std::vector<string> to a Java byte[][] where each Java String
- * is expressed as a Java Byte Array byte[].
- *
- * @param env A pointer to the java environment
- * @param strings A vector of Strings
- *
- * @return A Java array of Strings expressed as bytes
- */
- static jobjectArray stringsBytes(JNIEnv* env, std::vector<std::string> strings) {
- jclass jcls_ba = ByteJni::getArrayJClass(env);
- if(jcls_ba == nullptr) {
- // exception occurred
+ jstring jdb_name =
+ JniUtil::toJavaString(env, &(thread_status->db_name), true);
+ if (env->ExceptionCheck()) {
+ // an error occurred
return nullptr;
- }
+ }
- const jsize len = static_cast<jsize>(strings.size());
+ jstring jcf_name =
+ JniUtil::toJavaString(env, &(thread_status->cf_name), true);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ env->DeleteLocalRef(jdb_name);
+ return nullptr;
+ }
- jobjectArray jbyte_strings = env->NewObjectArray(len, jcls_ba, nullptr);
- if(jbyte_strings == nullptr) {
+ // long[]
+ const jsize len = static_cast<jsize>(rocksdb::ThreadStatus::kNumOperationProperties);
+ jlongArray joperation_properties =
+ env->NewLongArray(len);
+ if (joperation_properties == nullptr) {
+ // an exception occurred
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ return nullptr;
+ }
+ jlong *body = env->GetLongArrayElements(joperation_properties, nullptr);
+ if (body == nullptr) {
// exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ env->DeleteLocalRef(joperation_properties);
return nullptr;
- }
+ }
+ for (size_t i = 0; i < len; ++i) {
+ body[i] = static_cast<jlong>(thread_status->op_properties[i]);
+ }
+ env->ReleaseLongArrayElements(joperation_properties, body, 0);
+
+ jobject jcfd = env->NewObject(jclazz, mid,
+ static_cast<jlong>(thread_status->thread_id),
+ ThreadTypeJni::toJavaThreadType(thread_status->thread_type),
+ jdb_name,
+ jcf_name,
+ OperationTypeJni::toJavaOperationType(thread_status->operation_type),
+ static_cast<jlong>(thread_status->op_elapsed_micros),
+ OperationStageJni::toJavaOperationStage(thread_status->operation_stage),
+ joperation_properties,
+ StateTypeJni::toJavaStateType(thread_status->state_type));
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ env->DeleteLocalRef(joperation_properties);
+ return nullptr;
+ }
- for (jsize i = 0; i < len; i++) {
- std::string *str = &strings[i];
- const jsize str_len = static_cast<jsize>(str->size());
+ // cleanup
+ env->DeleteLocalRef(jdb_name);
+ env->DeleteLocalRef(jcf_name);
+ env->DeleteLocalRef(joperation_properties);
- jbyteArray jbyte_string_ary = env->NewByteArray(str_len);
- if(jbyte_string_ary == nullptr) {
- // exception thrown: OutOfMemoryError
- env->DeleteLocalRef(jbyte_strings);
- return nullptr;
- }
+ return jcfd;
+ }
+};
+
+// The portal class for org.rocksdb.CompactionStyle
+class CompactionStyleJni {
+ public:
+ // Returns the equivalent org.rocksdb.CompactionStyle for the provided
+ // C++ rocksdb::CompactionStyle enum
+ static jbyte toJavaCompactionStyle(
+ const rocksdb::CompactionStyle& compaction_style) {
+ switch(compaction_style) {
+ case rocksdb::CompactionStyle::kCompactionStyleLevel:
+ return 0x0;
+ case rocksdb::CompactionStyle::kCompactionStyleUniversal:
+ return 0x1;
+ case rocksdb::CompactionStyle::kCompactionStyleFIFO:
+ return 0x2;
+ case rocksdb::CompactionStyle::kCompactionStyleNone:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
+
+ // Returns the equivalent C++ rocksdb::CompactionStyle enum for the
+ // provided Java org.rocksdb.CompactionStyle
+ static rocksdb::CompactionStyle toCppCompactionStyle(
+ jbyte jcompaction_style) {
+ switch(jcompaction_style) {
+ case 0x0:
+ return rocksdb::CompactionStyle::kCompactionStyleLevel;
+ case 0x1:
+ return rocksdb::CompactionStyle::kCompactionStyleUniversal;
+ case 0x2:
+ return rocksdb::CompactionStyle::kCompactionStyleFIFO;
+ case 0x3:
+ return rocksdb::CompactionStyle::kCompactionStyleNone;
+ default:
+ // undefined/default
+ return rocksdb::CompactionStyle::kCompactionStyleLevel;
+ }
+ }
+};
+
+// The portal class for org.rocksdb.CompactionReason
+class CompactionReasonJni {
+ public:
+ // Returns the equivalent org.rocksdb.CompactionReason for the provided
+ // C++ rocksdb::CompactionReason enum
+ static jbyte toJavaCompactionReason(
+ const rocksdb::CompactionReason& compaction_reason) {
+ switch(compaction_reason) {
+ case rocksdb::CompactionReason::kUnknown:
+ return 0x0;
+ case rocksdb::CompactionReason::kLevelL0FilesNum:
+ return 0x1;
+ case rocksdb::CompactionReason::kLevelMaxLevelSize:
+ return 0x2;
+ case rocksdb::CompactionReason::kUniversalSizeAmplification:
+ return 0x3;
+ case rocksdb::CompactionReason::kUniversalSizeRatio:
+ return 0x4;
+ case rocksdb::CompactionReason::kUniversalSortedRunNum:
+ return 0x5;
+ case rocksdb::CompactionReason::kFIFOMaxSize:
+ return 0x6;
+ case rocksdb::CompactionReason::kFIFOReduceNumFiles:
+ return 0x7;
+ case rocksdb::CompactionReason::kFIFOTtl:
+ return 0x8;
+ case rocksdb::CompactionReason::kManualCompaction:
+ return 0x9;
+ case rocksdb::CompactionReason::kFilesMarkedForCompaction:
+ return 0x10;
+ case rocksdb::CompactionReason::kBottommostFiles:
+ return 0x0A;
+ case rocksdb::CompactionReason::kTtl:
+ return 0x0B;
+ case rocksdb::CompactionReason::kFlush:
+ return 0x0C;
+ case rocksdb::CompactionReason::kExternalSstIngestion:
+ return 0x0D;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- env->SetByteArrayRegion(
- jbyte_string_ary, 0, str_len,
- const_cast<jbyte*>(reinterpret_cast<const jbyte*>(str->c_str())));
- if(env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- env->DeleteLocalRef(jbyte_string_ary);
- env->DeleteLocalRef(jbyte_strings);
- return nullptr;
- }
+ // Returns the equivalent C++ rocksdb::CompactionReason enum for the
+ // provided Java org.rocksdb.CompactionReason
+ static rocksdb::CompactionReason toCppCompactionReason(
+ jbyte jcompaction_reason) {
+ switch(jcompaction_reason) {
+ case 0x0:
+ return rocksdb::CompactionReason::kUnknown;
+ case 0x1:
+ return rocksdb::CompactionReason::kLevelL0FilesNum;
+ case 0x2:
+ return rocksdb::CompactionReason::kLevelMaxLevelSize;
+ case 0x3:
+ return rocksdb::CompactionReason::kUniversalSizeAmplification;
+ case 0x4:
+ return rocksdb::CompactionReason::kUniversalSizeRatio;
+ case 0x5:
+ return rocksdb::CompactionReason::kUniversalSortedRunNum;
+ case 0x6:
+ return rocksdb::CompactionReason::kFIFOMaxSize;
+ case 0x7:
+ return rocksdb::CompactionReason::kFIFOReduceNumFiles;
+ case 0x8:
+ return rocksdb::CompactionReason::kFIFOTtl;
+ case 0x9:
+ return rocksdb::CompactionReason::kManualCompaction;
+ case 0x10:
+ return rocksdb::CompactionReason::kFilesMarkedForCompaction;
+ case 0x0A:
+ return rocksdb::CompactionReason::kBottommostFiles;
+ case 0x0B:
+ return rocksdb::CompactionReason::kTtl;
+ case 0x0C:
+ return rocksdb::CompactionReason::kFlush;
+ case 0x0D:
+ return rocksdb::CompactionReason::kExternalSstIngestion;
+ default:
+ // undefined/default
+ return rocksdb::CompactionReason::kUnknown;
+ }
+ }
+};
- env->SetObjectArrayElement(jbyte_strings, i, jbyte_string_ary);
- if(env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- // or ArrayStoreException
- env->DeleteLocalRef(jbyte_string_ary);
- env->DeleteLocalRef(jbyte_strings);
- return nullptr;
- }
+// The portal class for org.rocksdb.WalFileType
+class WalFileTypeJni {
+ public:
+ // Returns the equivalent org.rocksdb.WalFileType for the provided
+ // C++ rocksdb::WalFileType enum
+ static jbyte toJavaWalFileType(
+ const rocksdb::WalFileType& wal_file_type) {
+ switch(wal_file_type) {
+ case rocksdb::WalFileType::kArchivedLogFile:
+ return 0x0;
+ case rocksdb::WalFileType::kAliveLogFile:
+ return 0x1;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- env->DeleteLocalRef(jbyte_string_ary);
- }
+ // Returns the equivalent C++ rocksdb::WalFileType enum for the
+ // provided Java org.rocksdb.WalFileType
+ static rocksdb::WalFileType toCppWalFileType(
+ jbyte jwal_file_type) {
+ switch(jwal_file_type) {
+ case 0x0:
+ return rocksdb::WalFileType::kArchivedLogFile;
+ case 0x1:
+ return rocksdb::WalFileType::kAliveLogFile;
+ default:
+ // undefined/default
+ return rocksdb::WalFileType::kAliveLogFile;
+ }
+ }
+};
- return jbyte_strings;
+class LogFileJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.LogFile object.
+ *
+ * @param env A pointer to the Java environment
+ * @param log_file A Cpp log file object
+ *
+ * @return A reference to a Java org.rocksdb.LogFile object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppLogFile(JNIEnv* env, rocksdb::LogFile* log_file) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
}
- /**
- * Copies bytes to a new jByteArray with the check of java array size limitation.
- *
- * @param bytes pointer to memory to copy to a new jByteArray
- * @param size number of bytes to copy
- *
- * @return the Java byte[] or nullptr if an exception occurs
- *
- * @throws RocksDBException thrown
- * if memory size to copy exceeds general java array size limitation to avoid overflow.
- */
- static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, const char* bytes, const size_t size) {
- // Limitation for java array size is vm specific
- // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1)
- // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 (2^31 - 1 - 5)
- // It means that the next call to env->NewByteArray can still end with
- // OutOfMemoryError("Requested array size exceeds VM limit") coming from VM
- static const size_t MAX_JARRAY_SIZE = (static_cast<size_t>(1)) << 31;
- if(size > MAX_JARRAY_SIZE) {
- rocksdb::RocksDBExceptionJni::ThrowNew(env, "Requested array size exceeds VM limit");
- return nullptr;
- }
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(Ljava/lang/String;JBJJ)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
- const jsize jlen = static_cast<jsize>(size);
- jbyteArray jbytes = env->NewByteArray(jlen);
- if(jbytes == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
+ std::string path_name = log_file->PathName();
+ jstring jpath_name = rocksdb::JniUtil::toJavaString(env, &path_name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ return nullptr;
+ }
- env->SetByteArrayRegion(jbytes, 0, jlen,
- const_cast<jbyte*>(reinterpret_cast<const jbyte*>(bytes)));
- if(env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- env->DeleteLocalRef(jbytes);
- return nullptr;
- }
+ jobject jlog_file = env->NewObject(jclazz, mid,
+ jpath_name,
+ static_cast<jlong>(log_file->LogNumber()),
+ rocksdb::WalFileTypeJni::toJavaWalFileType(log_file->Type()),
+ static_cast<jlong>(log_file->StartSequence()),
+ static_cast<jlong>(log_file->SizeFileBytes())
+ );
- return jbytes;
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jpath_name);
+ return nullptr;
}
- /**
- * Copies bytes from a rocksdb::Slice to a jByteArray
- *
- * @param env A pointer to the java environment
- * @param bytes The bytes to copy
- *
- * @return the Java byte[] or nullptr if an exception occurs
- *
- * @throws RocksDBException thrown
- * if memory size to copy exceeds general java specific array size limitation.
- */
- static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) {
- return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size());
- }
+ // cleanup
+ env->DeleteLocalRef(jpath_name);
- /*
- * Helper for operations on a key and value
- * for example WriteBatch->Put
- *
- * TODO(AR) could be used for RocksDB->Put etc.
- */
- static std::unique_ptr<rocksdb::Status> kv_op(
- std::function<rocksdb::Status(rocksdb::Slice, rocksdb::Slice)> op,
- JNIEnv* env, jobject /*jobj*/,
- jbyteArray jkey, jint jkey_len,
- jbyteArray jvalue, jint jvalue_len) {
- jbyte* key = env->GetByteArrayElements(jkey, nullptr);
- if(env->ExceptionCheck()) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
+ return jlog_file;
+ }
- jbyte* value = env->GetByteArrayElements(jvalue, nullptr);
- if(env->ExceptionCheck()) {
- // exception thrown: OutOfMemoryError
- if(key != nullptr) {
- env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
- }
- return nullptr;
- }
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/LogFile");
+ }
+};
- rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
- rocksdb::Slice value_slice(reinterpret_cast<char*>(value),
- jvalue_len);
+class LiveFileMetaDataJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.LiveFileMetaData object.
+ *
+ * @param env A pointer to the Java environment
+ * @param live_file_meta_data A Cpp live file meta data object
+ *
+ * @return A reference to a Java org.rocksdb.LiveFileMetaData object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppLiveFileMetaData(JNIEnv* env,
+ rocksdb::LiveFileMetaData* live_file_meta_data) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
- auto status = op(key_slice, value_slice);
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "([BILjava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
- if(value != nullptr) {
- env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT);
- }
- if(key != nullptr) {
- env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
- }
+ jbyteArray jcolumn_family_name = rocksdb::JniUtil::copyBytes(
+ env, live_file_meta_data->column_family_name);
+ if (jcolumn_family_name == nullptr) {
+ // exception occurred creating java byte array
+ return nullptr;
+ }
- return std::unique_ptr<rocksdb::Status>(new rocksdb::Status(status));
+ jstring jfile_name = rocksdb::JniUtil::toJavaString(
+ env, &live_file_meta_data->name, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ return nullptr;
}
- /*
- * Helper for operations on a key
- * for example WriteBatch->Delete
- *
- * TODO(AR) could be used for RocksDB->Delete etc.
- */
- static std::unique_ptr<rocksdb::Status> k_op(
- std::function<rocksdb::Status(rocksdb::Slice)> op,
- JNIEnv* env, jobject /*jobj*/,
- jbyteArray jkey, jint jkey_len) {
- jbyte* key = env->GetByteArrayElements(jkey, nullptr);
- if(env->ExceptionCheck()) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
+ jstring jpath = rocksdb::JniUtil::toJavaString(
+ env, &live_file_meta_data->db_path, true);
+ if (env->ExceptionCheck()) {
+ // exception occurred creating java string
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ return nullptr;
+ }
- rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+ jbyteArray jsmallest_key = rocksdb::JniUtil::copyBytes(
+ env, live_file_meta_data->smallestkey);
+ if (jsmallest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ return nullptr;
+ }
- auto status = op(key_slice);
+ jbyteArray jlargest_key = rocksdb::JniUtil::copyBytes(
+ env, live_file_meta_data->largestkey);
+ if (jlargest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ return nullptr;
+ }
- if(key != nullptr) {
- env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
- }
+ jobject jlive_file_meta_data = env->NewObject(jclazz, mid,
+ jcolumn_family_name,
+ static_cast<jint>(live_file_meta_data->level),
+ jfile_name,
+ jpath,
+ static_cast<jlong>(live_file_meta_data->size),
+ static_cast<jlong>(live_file_meta_data->smallest_seqno),
+ static_cast<jlong>(live_file_meta_data->largest_seqno),
+ jsmallest_key,
+ jlargest_key,
+ static_cast<jlong>(live_file_meta_data->num_reads_sampled),
+ static_cast<jboolean>(live_file_meta_data->being_compacted),
+ static_cast<jlong>(live_file_meta_data->num_entries),
+ static_cast<jlong>(live_file_meta_data->num_deletions)
+ );
- return std::unique_ptr<rocksdb::Status>(new rocksdb::Status(status));
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
+ return nullptr;
}
- /*
- * Helper for operations on a value
- * for example WriteBatchWithIndex->GetFromBatch
- */
- static jbyteArray v_op(
- std::function<rocksdb::Status(rocksdb::Slice, std::string*)> op,
- JNIEnv* env, jbyteArray jkey, jint jkey_len) {
- jbyte* key = env->GetByteArrayElements(jkey, nullptr);
- if(env->ExceptionCheck()) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
+ // cleanup
+ env->DeleteLocalRef(jcolumn_family_name);
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
- rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+ return jlive_file_meta_data;
+ }
- std::string value;
- rocksdb::Status s = op(key_slice, &value);
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/LiveFileMetaData");
+ }
+};
- if(key != nullptr) {
- env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
- }
+class SstFileMetaDataJni : public JavaClass {
+ public:
+ /**
+ * Create a new Java org.rocksdb.SstFileMetaData object.
+ *
+ * @param env A pointer to the Java environment
+ * @param sst_file_meta_data A Cpp sst file meta data object
+ *
+ * @return A reference to a Java org.rocksdb.SstFileMetaData object, or
+ * nullptr if an an exception occurs
+ */
+ static jobject fromCppSstFileMetaData(JNIEnv* env,
+ const rocksdb::SstFileMetaData* sst_file_meta_data) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
- if (s.IsNotFound()) {
- return nullptr;
- }
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(Ljava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
- if (s.ok()) {
- jbyteArray jret_value =
- env->NewByteArray(static_cast<jsize>(value.size()));
- if(jret_value == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
+ jstring jfile_name = rocksdb::JniUtil::toJavaString(
+ env, &sst_file_meta_data->name, true);
+ if (jfile_name == nullptr) {
+ // exception occurred creating java byte array
+ return nullptr;
+ }
- env->SetByteArrayRegion(jret_value, 0, static_cast<jsize>(value.size()),
- const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value.c_str())));
- if(env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- if(jret_value != nullptr) {
- env->DeleteLocalRef(jret_value);
- }
- return nullptr;
- }
+ jstring jpath = rocksdb::JniUtil::toJavaString(
+ env, &sst_file_meta_data->db_path, true);
+ if (jpath == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jfile_name);
+ return nullptr;
+ }
- return jret_value;
- }
+ jbyteArray jsmallest_key = rocksdb::JniUtil::copyBytes(
+ env, sst_file_meta_data->smallestkey);
+ if (jsmallest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ return nullptr;
+ }
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ jbyteArray jlargest_key = rocksdb::JniUtil::copyBytes(
+ env, sst_file_meta_data->largestkey);
+ if (jlargest_key == nullptr) {
+ // exception occurred creating java byte array
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ return nullptr;
+ }
+
+ jobject jsst_file_meta_data = env->NewObject(jclazz, mid,
+ jfile_name,
+ jpath,
+ static_cast<jlong>(sst_file_meta_data->size),
+ static_cast<jint>(sst_file_meta_data->smallest_seqno),
+ static_cast<jlong>(sst_file_meta_data->largest_seqno),
+ jsmallest_key,
+ jlargest_key,
+ static_cast<jlong>(sst_file_meta_data->num_reads_sampled),
+ static_cast<jboolean>(sst_file_meta_data->being_compacted),
+ static_cast<jlong>(sst_file_meta_data->num_entries),
+ static_cast<jlong>(sst_file_meta_data->num_deletions)
+ );
+
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
return nullptr;
}
+
+ // cleanup
+ env->DeleteLocalRef(jfile_name);
+ env->DeleteLocalRef(jpath);
+ env->DeleteLocalRef(jsmallest_key);
+ env->DeleteLocalRef(jlargest_key);
+
+ return jsst_file_meta_data;
+ }
+
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/SstFileMetaData");
+ }
};
-class ColumnFamilyDescriptorJni : public JavaClass {
+class LevelMetaDataJni : public JavaClass {
public:
/**
- * Get the Java Class org.rocksdb.ColumnFamilyDescriptor
+ * Create a new Java org.rocksdb.LevelMetaData object.
*
* @param env A pointer to the Java environment
+ * @param level_meta_data A Cpp level meta data object
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return A reference to a Java org.rocksdb.LevelMetaData object, or
+ * nullptr if an an exception occurs
*/
+ static jobject fromCppLevelMetaData(JNIEnv* env,
+ const rocksdb::LevelMetaData* level_meta_data) {
+ jclass jclazz = getJClass(env);
+ if (jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
+ }
+
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(IJ[Lorg/rocksdb/SstFileMetaData;)V");
+ if (mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return nullptr;
+ }
+
+ const jsize jlen =
+ static_cast<jsize>(level_meta_data->files.size());
+ jobjectArray jfiles = env->NewObjectArray(jlen, SstFileMetaDataJni::getJClass(env), nullptr);
+ if (jfiles == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jsize i = 0;
+ for (auto it = level_meta_data->files.begin();
+ it != level_meta_data->files.end(); ++it) {
+ jobject jfile = SstFileMetaDataJni::fromCppSstFileMetaData(env, &(*it));
+ if (jfile == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jfiles);
+ return nullptr;
+ }
+ env->SetObjectArrayElement(jfiles, i++, jfile);
+ }
+
+ jobject jlevel_meta_data = env->NewObject(jclazz, mid,
+ static_cast<jint>(level_meta_data->level),
+ static_cast<jlong>(level_meta_data->size),
+ jfiles
+ );
+
+ if (env->ExceptionCheck()) {
+ env->DeleteLocalRef(jfiles);
+ return nullptr;
+ }
+
+ // cleanup
+ env->DeleteLocalRef(jfiles);
+
+ return jlevel_meta_data;
+ }
+
static jclass getJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyDescriptor");
+ return JavaClass::getJClass(env, "org/rocksdb/LevelMetaData");
}
+};
+class ColumnFamilyMetaDataJni : public JavaClass {
+ public:
/**
- * Create a new Java org.rocksdb.ColumnFamilyDescriptor object with the same
- * properties as the provided C++ rocksdb::ColumnFamilyDescriptor object
+ * Create a new Java org.rocksdb.ColumnFamilyMetaData object.
*
* @param env A pointer to the Java environment
- * @param cfd A pointer to rocksdb::ColumnFamilyDescriptor object
+ * @param column_famly_meta_data A Cpp live file meta data object
*
- * @return A reference to a Java org.rocksdb.ColumnFamilyDescriptor object, or
+ * @return A reference to a Java org.rocksdb.ColumnFamilyMetaData object, or
* nullptr if an an exception occurs
*/
- static jobject construct(JNIEnv* env, ColumnFamilyDescriptor* cfd) {
- jbyteArray jcf_name = JniUtil::copyBytes(env, cfd->name);
- jobject cfopts = ColumnFamilyOptionsJni::construct(env, &(cfd->options));
-
+ static jobject fromCppColumnFamilyMetaData(JNIEnv* env,
+ const rocksdb::ColumnFamilyMetaData* column_famly_meta_data) {
jclass jclazz = getJClass(env);
if (jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- jmethodID mid = env->GetMethodID(jclazz, "<init>",
- "([BLorg/rocksdb/ColumnFamilyOptions;)V");
+ jmethodID mid = env->GetMethodID(jclazz, "<init>", "(JJ[B[Lorg/rocksdb/LevelMetaData;)V");
if (mid == nullptr) {
// exception thrown: NoSuchMethodException or OutOfMemoryError
- env->DeleteLocalRef(jcf_name);
return nullptr;
}
- jobject jcfd = env->NewObject(jclazz, mid, jcf_name, cfopts);
+ jbyteArray jname = rocksdb::JniUtil::copyBytes(
+ env, column_famly_meta_data->name);
+ if (jname == nullptr) {
+ // exception occurred creating java byte array
+ return nullptr;
+ }
+
+ const jsize jlen =
+ static_cast<jsize>(column_famly_meta_data->levels.size());
+ jobjectArray jlevels = env->NewObjectArray(jlen, LevelMetaDataJni::getJClass(env), nullptr);
+ if(jlevels == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->DeleteLocalRef(jname);
+ return nullptr;
+ }
+
+ jsize i = 0;
+ for (auto it = column_famly_meta_data->levels.begin();
+ it != column_famly_meta_data->levels.end(); ++it) {
+ jobject jlevel = LevelMetaDataJni::fromCppLevelMetaData(env, &(*it));
+ if (jlevel == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jname);
+ env->DeleteLocalRef(jlevels);
+ return nullptr;
+ }
+ env->SetObjectArrayElement(jlevels, i++, jlevel);
+ }
+
+ jobject jcolumn_family_meta_data = env->NewObject(jclazz, mid,
+ static_cast<jlong>(column_famly_meta_data->size),
+ static_cast<jlong>(column_famly_meta_data->file_count),
+ jname,
+ jlevels
+ );
+
if (env->ExceptionCheck()) {
- env->DeleteLocalRef(jcf_name);
+ env->DeleteLocalRef(jname);
+ env->DeleteLocalRef(jlevels);
return nullptr;
}
- return jcfd;
+ // cleanup
+ env->DeleteLocalRef(jname);
+ env->DeleteLocalRef(jlevels);
+
+ return jcolumn_family_meta_data;
+ }
+
+ static jclass getJClass(JNIEnv* env) {
+ return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyMetaData");
}
+};
+// The portal class for org.rocksdb.AbstractTraceWriter
+class AbstractTraceWriterJni : public RocksDBNativeClass<
+ const rocksdb::TraceWriterJniCallback*,
+ AbstractTraceWriterJni> {
+ public:
/**
- * Get the Java Method: ColumnFamilyDescriptor#columnFamilyName
+ * Get the Java Class org.rocksdb.AbstractTraceWriter
+ *
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Class or nullptr if one of the
+ * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
+ * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ */
+ static jclass getJClass(JNIEnv* env) {
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractTraceWriter");
+ }
+
+ /**
+ * Get the Java Method: AbstractTraceWriter#write
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getColumnFamilyNameMethod(JNIEnv* env) {
+ static jmethodID getWriteProxyMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
- if (jclazz == nullptr) {
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid = env->GetMethodID(jclazz, "columnFamilyName", "()[B");
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "writeProxy", "(J)S");
assert(mid != nullptr);
return mid;
}
/**
- * Get the Java Method: ColumnFamilyDescriptor#columnFamilyOptions
+ * Get the Java Method: AbstractTraceWriter#closeWriter
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) {
+ static jmethodID getCloseWriterProxyMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
- if (jclazz == nullptr) {
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
static jmethodID mid = env->GetMethodID(
- jclazz, "columnFamilyOptions", "()Lorg/rocksdb/ColumnFamilyOptions;");
+ jclazz, "closeWriterProxy", "()S");
assert(mid != nullptr);
return mid;
}
-};
-
-class MapJni : public JavaClass {
- public:
- /**
- * Get the Java Class java.util.Map
- *
- * @param env A pointer to the Java environment
- *
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
- */
- static jclass getClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/util/Map");
- }
/**
- * Get the Java Method: Map#put
+ * Get the Java Method: AbstractTraceWriter#getFileSize
*
* @param env A pointer to the Java environment
*
* @return The Java Method ID or nullptr if the class or method id could not
* be retieved
*/
- static jmethodID getMapPutMethodId(JNIEnv* env) {
- jclass jlist_clazz = getClass(env);
- if(jlist_clazz == nullptr) {
+ static jmethodID getGetFileSizeMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- static jmethodID mid =
- env->GetMethodID(jlist_clazz, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "getFileSize", "()J");
assert(mid != nullptr);
return mid;
}
};
-class HashMapJni : public JavaClass {
+// The portal class for org.rocksdb.AbstractWalFilter
+class AbstractWalFilterJni : public RocksDBNativeClass<
+ const rocksdb::WalFilterJniCallback*,
+ AbstractWalFilterJni> {
public:
/**
- * Get the Java Class java.util.HashMap
+ * Get the Java Class org.rocksdb.AbstractWalFilter
*
* @param env A pointer to the Java environment
*
* OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
*/
static jclass getJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/util/HashMap");
+ return RocksDBNativeClass::getJClass(env,
+ "org/rocksdb/AbstractWalFilter");
}
/**
- * Create a new Java java.util.HashMap object.
+ * Get the Java Method: AbstractWalFilter#columnFamilyLogNumberMap
*
* @param env A pointer to the Java environment
*
- * @return A reference to a Java java.util.HashMap object, or
- * nullptr if an an exception occurs
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jobject construct(JNIEnv* env, const uint32_t initial_capacity = 16) {
+ static jmethodID getColumnFamilyLogNumberMapMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
- if (jclazz == nullptr) {
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- jmethodID mid = env->GetMethodID(jclazz, "<init>", "(I)V");
- if (mid == nullptr) {
- // exception thrown: NoSuchMethodException or OutOfMemoryError
- return nullptr;
- }
-
- jobject jhash_map = env->NewObject(jclazz, mid, static_cast<jint>(initial_capacity));
- if (env->ExceptionCheck()) {
- return nullptr;
- }
-
- return jhash_map;
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "columnFamilyLogNumberMap",
+ "(Ljava/util/Map;Ljava/util/Map;)V");
+ assert(mid != nullptr);
+ return mid;
}
/**
- * A function which maps a std::pair<K,V> to a std::pair<jobject, jobject>
+ * Get the Java Method: AbstractTraceWriter#logRecordFoundProxy
*
- * @return Either a pointer to a std::pair<jobject, jobject>, or nullptr
- * if an error occurs during the mapping
- */
- template <typename K, typename V>
- using FnMapKV = std::function<std::unique_ptr<std::pair<jobject, jobject>> (const std::pair<K, V>&)>;
-
- // template <class I, typename K, typename V, typename K1, typename V1, typename std::enable_if<std::is_same<typename std::iterator_traits<I>::value_type, std::pair<const K,V>>::value, int32_t>::type = 0>
- // static void putAll(JNIEnv* env, const jobject jhash_map, I iterator, const FnMapKV<const K,V,K1,V1> &fn_map_kv) {
- /**
- * Returns true if it succeeds, false if an error occurs
+ * @param env A pointer to the Java environment
+ *
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- template<class iterator_type, typename K, typename V>
- static bool putAll(JNIEnv* env, const jobject jhash_map, iterator_type iterator, iterator_type end, const FnMapKV<K, V> &fn_map_kv) {
- const jmethodID jmid_put = rocksdb::MapJni::getMapPutMethodId(env);
- if (jmid_put == nullptr) {
- return false;
- }
-
- for (auto it = iterator; it != end; ++it) {
- const std::unique_ptr<std::pair<jobject, jobject>> result = fn_map_kv(*it);
- if (result == nullptr) {
- // an error occurred during fn_map_kv
- return false;
- }
- env->CallObjectMethod(jhash_map, jmid_put, result->first, result->second);
- if (env->ExceptionCheck()) {
- // exception occurred
- env->DeleteLocalRef(result->second);
- env->DeleteLocalRef(result->first);
- return false;
- }
-
- // release local references
- env->DeleteLocalRef(result->second);
- env->DeleteLocalRef(result->first);
+ static jmethodID getLogRecordFoundProxyMethodId(JNIEnv* env) {
+ jclass jclazz = getJClass(env);
+ if(jclazz == nullptr) {
+ // exception occurred accessing class
+ return nullptr;
}
- return true;
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "logRecordFoundProxy", "(JLjava/lang/String;JJ)S");
+ assert(mid != nullptr);
+ return mid;
}
-};
-class LongJni : public JavaClass {
- public:
/**
- * Get the Java Class java.lang.Long
+ * Get the Java Method: AbstractTraceWriter#name
*
* @param env A pointer to the Java environment
*
- * @return The Java Class or nullptr if one of the
- * ClassFormatError, ClassCircularityError, NoClassDefFoundError,
- * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown
+ * @return The Java Method ID or nullptr if the class or method id could not
+ * be retieved
*/
- static jclass getJClass(JNIEnv* env) {
- return JavaClass::getJClass(env, "java/lang/Long");
- }
-
- static jobject valueOf(JNIEnv* env, jlong jprimitive_long) {
+ static jmethodID getNameMethodId(JNIEnv* env) {
jclass jclazz = getJClass(env);
- if (jclazz == nullptr) {
+ if(jclazz == nullptr) {
// exception occurred accessing class
return nullptr;
}
- jmethodID mid =
- env->GetStaticMethodID(jclazz, "valueOf", "(J)Ljava/lang/Long;");
- if (mid == nullptr) {
- // exception thrown: NoSuchMethodException or OutOfMemoryError
- return nullptr;
- }
+ static jmethodID mid = env->GetMethodID(
+ jclazz, "name", "()Ljava/lang/String;");
+ assert(mid != nullptr);
+ return mid;
+ }
+};
- const jobject jlong_obj =
- env->CallStaticObjectMethod(jclazz, mid, jprimitive_long);
- if (env->ExceptionCheck()) {
- // exception occurred
- return nullptr;
- }
+// The portal class for org.rocksdb.WalProcessingOption
+class WalProcessingOptionJni {
+ public:
+ // Returns the equivalent org.rocksdb.WalProcessingOption for the provided
+ // C++ rocksdb::WalFilter::WalProcessingOption enum
+ static jbyte toJavaWalProcessingOption(
+ const rocksdb::WalFilter::WalProcessingOption& wal_processing_option) {
+ switch(wal_processing_option) {
+ case rocksdb::WalFilter::WalProcessingOption::kContinueProcessing:
+ return 0x0;
+ case rocksdb::WalFilter::WalProcessingOption::kIgnoreCurrentRecord:
+ return 0x1;
+ case rocksdb::WalFilter::WalProcessingOption::kStopReplay:
+ return 0x2;
+ case rocksdb::WalFilter::WalProcessingOption::kCorruptedRecord:
+ return 0x3;
+ default:
+ return 0x7F; // undefined
+ }
+ }
- return jlong_obj;
- }
+ // Returns the equivalent C++ rocksdb::WalFilter::WalProcessingOption enum for
+ // the provided Java org.rocksdb.WalProcessingOption
+ static rocksdb::WalFilter::WalProcessingOption toCppWalProcessingOption(
+ jbyte jwal_processing_option) {
+ switch(jwal_processing_option) {
+ case 0x0:
+ return rocksdb::WalFilter::WalProcessingOption::kContinueProcessing;
+ case 0x1:
+ return rocksdb::WalFilter::WalProcessingOption::kIgnoreCurrentRecord;
+ case 0x2:
+ return rocksdb::WalFilter::WalProcessingOption::kStopReplay;
+ case 0x3:
+ return rocksdb::WalFilter::WalProcessingOption::kCorruptedRecord;
+ default:
+ // undefined/default
+ return rocksdb::WalFilter::WalProcessingOption::kCorruptedRecord;
+ }
+ }
};
} // namespace rocksdb
#endif // JAVA_ROCKSJNI_PORTAL_H_
#undef min
#endif
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Open
jlong rocksdb_open_helper(
JNIEnv* env, jlong jopt_handle, jstring jdb_path,
std::function<rocksdb::Status(const rocksdb::Options&, const std::string&,
* Method: open
* Signature: (JLjava/lang/String;)J
*/
-jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2(JNIEnv* env,
- jclass /*jcls*/,
- jlong jopt_handle,
- jstring jdb_path) {
+jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path) {
return rocksdb_open_helper(
env, jopt_handle, jdb_path,
(rocksdb::Status(*)(const rocksdb::Options&, const std::string&,
* Signature: (JLjava/lang/String;)J
*/
jlong Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2(
- JNIEnv* env, jclass /*jcls*/, jlong jopt_handle, jstring jdb_path) {
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path) {
return rocksdb_open_helper(env, jopt_handle, jdb_path,
[](const rocksdb::Options& options,
const std::string& db_path, rocksdb::DB** db) {
}
auto* opt = reinterpret_cast<rocksdb::DBOptions*>(jopt_handle);
- std::vector<rocksdb::ColumnFamilyHandle*> handles;
+ std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
rocksdb::DB* db = nullptr;
- rocksdb::Status s = open_fn(*opt, db_path, column_families, &handles, &db);
+ rocksdb::Status s = open_fn(*opt, db_path, column_families, &cf_handles, &db);
// we have now finished with db_path
env->ReleaseStringUTFChars(jdb_path, db_path);
// check if open operation was successful
- if (s.ok()) {
- const jsize resultsLen = 1 + len_cols; // db handle + column family handles
- std::unique_ptr<jlong[]> results =
- std::unique_ptr<jlong[]>(new jlong[resultsLen]);
- results[0] = reinterpret_cast<jlong>(db);
- for (int i = 1; i <= len_cols; i++) {
- results[i] = reinterpret_cast<jlong>(handles[i - 1]);
- }
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
- jlongArray jresults = env->NewLongArray(resultsLen);
- if (jresults == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
+ const jsize resultsLen = 1 + len_cols; // db handle + column family handles
+ std::unique_ptr<jlong[]> results =
+ std::unique_ptr<jlong[]>(new jlong[resultsLen]);
+ results[0] = reinterpret_cast<jlong>(db);
+ for (int i = 1; i <= len_cols; i++) {
+ results[i] = reinterpret_cast<jlong>(cf_handles[i - 1]);
+ }
- env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- env->DeleteLocalRef(jresults);
- return nullptr;
- }
+ jlongArray jresults = env->NewLongArray(resultsLen);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
- return jresults;
- } else {
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ env->SetLongArrayRegion(jresults, 0, resultsLen, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresults);
return nullptr;
}
+
+ return jresults;
}
/*
* Signature: (JLjava/lang/String;[[B[J)[J
*/
jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J(
- JNIEnv* env, jclass /*jcls*/, jlong jopt_handle, jstring jdb_path,
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options) {
return rocksdb_open_helper(
env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options,
* Signature: (JLjava/lang/String;[[B[J)[J
*/
jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J(
- JNIEnv* env, jclass /*jcls*/, jlong jopt_handle, jstring jdb_path,
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path,
jobjectArray jcolumn_names, jlongArray jcolumn_options) {
return rocksdb_open_helper(
env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options,
rocksdb::DB::Open);
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::ListColumnFamilies
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jhandle);
+ assert(db != nullptr);
+ delete db;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_closeDatabase(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jhandle);
+ assert(db != nullptr);
+ rocksdb::Status s = db->Close();
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+}
/*
* Class: org_rocksdb_RocksDB
* Method: listColumnFamilies
* Signature: (JLjava/lang/String;)[[B
*/
-jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(JNIEnv* env,
- jclass /*jclazz*/,
- jlong jopt_handle,
- jstring jdb_path) {
+jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path) {
std::vector<std::string> column_family_names;
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
return jcolumn_family_names;
}
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: createColumnFamily
+ * Signature: (J[BIJ)J
+ */
+jlong Java_org_rocksdb_RocksDB_createColumnFamily(
+ JNIEnv* env, jobject, jlong jhandle, jbyteArray jcf_name,
+ jint jcf_name_len, jlong jcf_options_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jhandle);
+ jboolean has_exception = JNI_FALSE;
+ const std::string cf_name =
+ rocksdb::JniUtil::byteString<std::string>(env, jcf_name, jcf_name_len,
+ [](const char* str, const size_t len) {
+ return std::string(str, len);
+ }, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return 0;
+ }
+ auto* cf_options =
+ reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcf_options_handle);
+ rocksdb::ColumnFamilyHandle *cf_handle;
+ rocksdb::Status s = db->CreateColumnFamily(*cf_options, cf_name, &cf_handle);
+ if (!s.ok()) {
+ // error occurred
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return 0;
+ }
+ return reinterpret_cast<jlong>(cf_handle);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: createColumnFamilies
+ * Signature: (JJ[[B)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__JJ_3_3B(
+ JNIEnv* env, jobject, jlong jhandle, jlong jcf_options_handle,
+ jobjectArray jcf_names) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jhandle);
+ auto* cf_options =
+ reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcf_options_handle);
+ jboolean has_exception = JNI_FALSE;
+ std::vector<std::string> cf_names;
+ rocksdb::JniUtil::byteStrings<std::string>(env, jcf_names,
+ [](const char* str, const size_t len) {
+ return std::string(str, len);
+ },
+ [&cf_names](const size_t, std::string str) {
+ cf_names.push_back(str);
+ },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+
+ std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
+ rocksdb::Status s = db->CreateColumnFamilies(*cf_options, cf_names, &cf_handles);
+ if (!s.ok()) {
+ // error occurred
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ jlongArray jcf_handles = rocksdb::JniUtil::toJPointers<rocksdb::ColumnFamilyHandle>(
+ env, cf_handles, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+ return jcf_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: createColumnFamilies
+ * Signature: (J[J[[B)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__J_3J_3_3B(
+ JNIEnv* env, jobject, jlong jhandle, jlongArray jcf_options_handles,
+ jobjectArray jcf_names) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jhandle);
+ const jsize jlen = env->GetArrayLength(jcf_options_handles);
+ std::vector<rocksdb::ColumnFamilyDescriptor> cf_descriptors;
+ cf_descriptors.reserve(jlen);
+
+ jboolean jcf_options_handles_is_copy = JNI_FALSE;
+ jlong *jcf_options_handles_elems = env->GetLongArrayElements(jcf_options_handles, &jcf_options_handles_is_copy);
+ if(jcf_options_handles_elems == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ // extract the column family descriptors
+ jboolean has_exception = JNI_FALSE;
+ for (jsize i = 0; i < jlen; i++) {
+ auto* cf_options = reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
+ jcf_options_handles_elems[i]);
+ jbyteArray jcf_name = static_cast<jbyteArray>(
+ env->GetObjectArrayElement(jcf_names, i));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT);
+ return nullptr;
+ }
+ const std::string cf_name =
+ rocksdb::JniUtil::byteString<std::string>(env, jcf_name,
+ [](const char* str, const size_t len) {
+ return std::string(str, len);
+ },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jcf_name);
+ env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT);
+ return nullptr;
+ }
+
+ cf_descriptors.push_back(rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options));
+
+ env->DeleteLocalRef(jcf_name);
+ }
+
+ std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
+ rocksdb::Status s = db->CreateColumnFamilies(cf_descriptors, &cf_handles);
+
+ env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, JNI_ABORT);
+
+ if (!s.ok()) {
+ // error occurred
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ jlongArray jcf_handles = rocksdb::JniUtil::toJPointers<rocksdb::ColumnFamilyHandle>(
+ env, cf_handles, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
+ }
+ return jcf_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: dropColumnFamily
+ * Signature: (JJ)V;
+ */
+void Java_org_rocksdb_RocksDB_dropColumnFamily(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ rocksdb::Status s = db_handle->DropColumnFamily(cf_handle);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: dropColumnFamilies
+ * Signature: (J[J)V
+ */
+void Java_org_rocksdb_RocksDB_dropColumnFamilies(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlongArray jcolumn_family_handles) {
+ auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+
+ std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
+ if (jcolumn_family_handles != nullptr) {
+ const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
+
+ jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
+ if (jcfh == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return;
+ }
+
+ for (jsize i = 0; i < len_cols; i++) {
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
+ cf_handles.push_back(cf_handle);
+ }
+ env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
+ }
+
+ rocksdb::Status s = db_handle->DropColumnFamilies(cf_handles);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
//////////////////////////////////////////////////////////////////////////////
// rocksdb::DB::Put
/**
* @return true if the put succeeded, false if a Java Exception was thrown
*/
-bool rocksdb_put_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::WriteOptions& write_options,
- rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey,
- jint jkey_off, jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len) {
+bool rocksdb_put_helper(
+ JNIEnv* env, rocksdb::DB* db,
+ const rocksdb::WriteOptions& write_options,
+ rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey,
+ jint jkey_off, jint jkey_len, jbyteArray jval,
+ jint jval_off, jint jval_len) {
jbyte* key = new jbyte[jkey_len];
env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
if (env->ExceptionCheck()) {
* Method: put
* Signature: (J[BII[BII)V
*/
-void Java_org_rocksdb_RocksDB_put__J_3BII_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len) {
+void Java_org_rocksdb_RocksDB_put__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
static const rocksdb::WriteOptions default_write_options =
rocksdb::WriteOptions();
-
rocksdb_put_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
- jkey_len, jval, jval_off, jval_len);
+ jkey_len, jval, jval_off, jval_len);
}
/*
* Method: put
* Signature: (J[BII[BIIJ)V
*/
-void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len,
- jlong jcf_handle) {
+void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
static const rocksdb::WriteOptions default_write_options =
rocksdb::WriteOptions();
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
rocksdb_put_helper(env, db, default_write_options, cf_handle, jkey,
- jkey_off, jkey_len, jval, jval_off, jval_len);
+ jkey_off, jkey_len, jval, jval_off, jval_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
* Method: put
* Signature: (JJ[BII[BII)V
*/
-void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jwrite_options_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len) {
+void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
-
rocksdb_put_helper(env, db, *write_options, nullptr, jkey, jkey_off, jkey_len,
- jval, jval_off, jval_len);
+ jval, jval_off, jval_len);
}
/*
* Signature: (JJ[BII[BIIJ)V
*/
void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len, jlong jcf_handle) {
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
rocksdb_put_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
- jkey_len, jval, jval_off, jval_len);
+ jkey_len, jval, jval_off, jval_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
}
//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Write
-/*
- * Class: org_rocksdb_RocksDB
- * Method: write0
- * Signature: (JJJ)V
- */
-void Java_org_rocksdb_RocksDB_write0(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jwrite_options_handle,
- jlong jwb_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
- auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
-
- rocksdb::Status s = db->Write(*write_options, wb);
-
- if (!s.ok()) {
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- }
-}
+// rocksdb::DB::Delete()
-/*
- * Class: org_rocksdb_RocksDB
- * Method: write1
- * Signature: (JJJ)V
+/**
+ * @return true if the delete succeeded, false if a Java Exception was thrown
*/
-void Java_org_rocksdb_RocksDB_write1(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jwrite_options_handle,
- jlong jwbwi_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
- auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
- auto* wb = wbwi->GetWriteBatch();
-
- rocksdb::Status s = db->Write(*write_options, wb);
-
- if (!s.ok()) {
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- }
-}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::KeyMayExist
-jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::ReadOptions& read_opt,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len,
- jobject jstring_builder, bool* has_exception) {
+bool rocksdb_delete_helper(
+ JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options,
+ rocksdb::ColumnFamilyHandle* cf_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
jbyte* key = new jbyte[jkey_len];
env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
if (env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
delete[] key;
- *has_exception = true;
return false;
}
-
rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
- std::string value;
- bool value_found = false;
- bool keyMayExist;
+ rocksdb::Status s;
if (cf_handle != nullptr) {
- keyMayExist =
- db->KeyMayExist(read_opt, cf_handle, key_slice, &value, &value_found);
+ s = db->Delete(write_options, cf_handle, key_slice);
} else {
- keyMayExist = db->KeyMayExist(read_opt, key_slice, &value, &value_found);
+ // backwards compatibility
+ s = db->Delete(write_options, key_slice);
}
// cleanup
delete[] key;
- // extract the value
- if (value_found && !value.empty()) {
- jobject jresult_string_builder =
- rocksdb::StringBuilderJni::append(env, jstring_builder, value.c_str());
- if (jresult_string_builder == nullptr) {
- *has_exception = true;
- return false;
- }
+ if (s.ok()) {
+ return true;
}
- *has_exception = false;
- return static_cast<jboolean>(keyMayExist);
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
}
/*
* Class: org_rocksdb_RocksDB
- * Method: keyMayExist
- * Signature: (J[BIILjava/lang/StringBuilder;)Z
+ * Method: delete
+ * Signature: (J[BII)V
*/
-jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIILjava_lang_StringBuilder_2(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jkey,
- jint jkey_off, jint jkey_len, jobject jstring_builder) {
+void Java_org_rocksdb_RocksDB_delete__J_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- bool has_exception = false;
- return key_may_exist_helper(env, db, rocksdb::ReadOptions(), nullptr, jkey,
- jkey_off, jkey_len, jstring_builder,
- &has_exception);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ rocksdb_delete_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
+ jkey_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: keyMayExist
- * Signature: (J[BIIJLjava/lang/StringBuilder;)Z
+ * Method: delete
+ * Signature: (J[BIIJ)V
*/
-jboolean
-Java_org_rocksdb_RocksDB_keyMayExist__J_3BIIJLjava_lang_StringBuilder_2(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jkey,
- jint jkey_off, jint jkey_len, jlong jcf_handle, jobject jstring_builder) {
+void Java_org_rocksdb_RocksDB_delete__J_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jlong jcf_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- bool has_exception = false;
- return key_may_exist_helper(env, db, rocksdb::ReadOptions(), cf_handle,
- jkey, jkey_off, jkey_len, jstring_builder,
- &has_exception);
+ rocksdb_delete_helper(env, db, default_write_options, cf_handle, jkey,
+ jkey_off, jkey_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- return true;
}
}
/*
* Class: org_rocksdb_RocksDB
- * Method: keyMayExist
- * Signature: (JJ[BIILjava/lang/StringBuilder;)Z
+ * Method: delete
+ * Signature: (JJ[BII)V
*/
-jboolean
-Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIILjava_lang_StringBuilder_2(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jread_options_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len, jobject jstring_builder) {
+void Java_org_rocksdb_RocksDB_delete__JJ_3BII(
+ JNIEnv* env, jobject,
+ jlong jdb_handle,
+ jlong jwrite_options,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto& read_options =
- *reinterpret_cast<rocksdb::ReadOptions*>(jread_options_handle);
- bool has_exception = false;
- return key_may_exist_helper(env, db, read_options, nullptr, jkey, jkey_off,
- jkey_len, jstring_builder, &has_exception);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
+ rocksdb_delete_helper(env, db, *write_options, nullptr, jkey, jkey_off,
+ jkey_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: keyMayExist
- * Signature: (JJ[BIIJLjava/lang/StringBuilder;)Z
+ * Method: delete
+ * Signature: (JJ[BIIJ)V
*/
-jboolean
-Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIIJLjava_lang_StringBuilder_2(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jread_options_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle,
- jobject jstring_builder) {
+void Java_org_rocksdb_RocksDB_delete__JJ_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto& read_options =
- *reinterpret_cast<rocksdb::ReadOptions*>(jread_options_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- bool has_exception = false;
- return key_may_exist_helper(env, db, read_options, cf_handle, jkey,
- jkey_off, jkey_len, jstring_builder,
- &has_exception);
+ rocksdb_delete_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
+ jkey_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- return true;
}
}
//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Get
-
-jbyteArray rocksdb_get_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::ReadOptions& read_opt,
- rocksdb::ColumnFamilyHandle* column_family_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len) {
- jbyte* key = new jbyte[jkey_len];
- env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- delete[] key;
- return nullptr;
+// rocksdb::DB::SingleDelete()
+/**
+ * @return true if the single delete succeeded, false if a Java Exception
+ * was thrown
+ */
+bool rocksdb_single_delete_helper(
+ JNIEnv* env, rocksdb::DB* db,
+ const rocksdb::WriteOptions& write_options,
+ rocksdb::ColumnFamilyHandle* cf_handle,
+ jbyteArray jkey, jint jkey_len) {
+ jbyte* key = env->GetByteArrayElements(jkey, nullptr);
+ if (key == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return false;
}
-
rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
- std::string value;
rocksdb::Status s;
- if (column_family_handle != nullptr) {
- s = db->Get(read_opt, column_family_handle, key_slice, &value);
+ if (cf_handle != nullptr) {
+ s = db->SingleDelete(write_options, cf_handle, key_slice);
} else {
// backwards compatibility
- s = db->Get(read_opt, key_slice, &value);
+ s = db->SingleDelete(write_options, key_slice);
}
- // cleanup
- delete[] key;
-
- if (s.IsNotFound()) {
- return nullptr;
- }
+ // trigger java unref on key and value.
+ // by passing JNI_ABORT, it will simply release the reference without
+ // copying the result back to the java byte array.
+ env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
if (s.ok()) {
- jbyteArray jret_value = rocksdb::JniUtil::copyBytes(env, value);
- if (jret_value == nullptr) {
- // exception occurred
- return nullptr;
- }
- return jret_value;
+ return true;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- return nullptr;
+ return false;
}
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (J[BII)[B
+ * Method: singleDelete
+ * Signature: (J[BI)V
*/
-jbyteArray Java_org_rocksdb_RocksDB_get__J_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len) {
- return rocksdb_get_helper(env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- rocksdb::ReadOptions(), nullptr, jkey, jkey_off,
- jkey_len);
+void Java_org_rocksdb_RocksDB_singleDelete__J_3BI(
+ JNIEnv* env, jobject,
+ jlong jdb_handle,
+ jbyteArray jkey,
+ jint jkey_len) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ rocksdb_single_delete_helper(env, db, default_write_options, nullptr,
+ jkey, jkey_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (J[BIIJ)[B
+ * Method: singleDelete
+ * Signature: (J[BIJ)V
*/
-jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIIJ(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len,
- jlong jcf_handle) {
- auto db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+void Java_org_rocksdb_RocksDB_singleDelete__J_3BIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle,
- jkey, jkey_off, jkey_len);
+ rocksdb_single_delete_helper(env, db, default_write_options, cf_handle,
+ jkey, jkey_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- return nullptr;
}
}
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (JJ[BII)[B
+ * Method: singleDelete
+ * Signature: (JJ[BIJ)V
*/
-jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jropt_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len) {
- return rocksdb_get_helper(
- env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), nullptr, jkey,
- jkey_off, jkey_len);
+void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BI(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options,
+ jbyteArray jkey,
+ jint jkey_len) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
+ rocksdb_single_delete_helper(env, db, *write_options, nullptr, jkey,
+ jkey_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (JJ[BIIJ)[B
+ * Method: singleDelete
+ * Signature: (JJ[BIJ)V
*/
-jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jropt_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
- auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto& ro_opt = *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle);
+void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, jkey_off,
- jkey_len);
+ rocksdb_single_delete_helper(env, db, *write_options, cf_handle, jkey,
+ jkey_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- return nullptr;
}
}
-jint rocksdb_get_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::ReadOptions& read_options,
- rocksdb::ColumnFamilyHandle* column_family_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len,
- jbyteArray jval, jint jval_off, jint jval_len,
- bool* has_exception) {
- static const int kNotFound = -1;
- static const int kStatusError = -2;
-
- jbyte* key = new jbyte[jkey_len];
- env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+//////////////////////////////////////////////////////////////////////////////
+// rocksdb::DB::DeleteRange()
+/**
+ * @return true if the delete range succeeded, false if a Java Exception
+ * was thrown
+ */
+bool rocksdb_delete_range_helper(
+ JNIEnv* env, rocksdb::DB* db,
+ const rocksdb::WriteOptions& write_options,
+ rocksdb::ColumnFamilyHandle* cf_handle,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len) {
+ jbyte* begin_key = new jbyte[jbegin_key_len];
+ env->GetByteArrayRegion(jbegin_key, jbegin_key_off, jbegin_key_len,
+ begin_key);
if (env->ExceptionCheck()) {
- // exception thrown: OutOfMemoryError
- delete[] key;
- *has_exception = true;
- return kStatusError;
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] begin_key;
+ return false;
}
- rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+ rocksdb::Slice begin_key_slice(reinterpret_cast<char*>(begin_key),
+ jbegin_key_len);
- // TODO(yhchiang): we might save one memory allocation here by adding
- // a DB::Get() function which takes preallocated jbyte* as input.
- std::string cvalue;
- rocksdb::Status s;
- if (column_family_handle != nullptr) {
- s = db->Get(read_options, column_family_handle, key_slice, &cvalue);
- } else {
- // backwards compatibility
- s = db->Get(read_options, key_slice, &cvalue);
+ jbyte* end_key = new jbyte[jend_key_len];
+ env->GetByteArrayRegion(jend_key, jend_key_off, jend_key_len, end_key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] begin_key;
+ delete[] end_key;
+ return false;
}
+ rocksdb::Slice end_key_slice(reinterpret_cast<char*>(end_key), jend_key_len);
- // cleanup
- delete[] key;
+ rocksdb::Status s =
+ db->DeleteRange(write_options, cf_handle, begin_key_slice, end_key_slice);
- if (s.IsNotFound()) {
- *has_exception = false;
- return kNotFound;
- } else if (!s.ok()) {
- *has_exception = true;
- // Here since we are throwing a Java exception from c++ side.
- // As a result, c++ does not know calling this function will in fact
- // throwing an exception. As a result, the execution flow will
- // not stop here, and codes after this throw will still be
- // executed.
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ // cleanup
+ delete[] begin_key;
+ delete[] end_key;
- // Return a dummy const value to avoid compilation error, although
- // java side might not have a chance to get the return value :)
- return kStatusError;
+ if (s.ok()) {
+ return true;
}
- const jint cvalue_len = static_cast<jint>(cvalue.size());
- const jint length = std::min(jval_len, cvalue_len);
-
- env->SetByteArrayRegion(
- jval, jval_off, length,
- const_cast<jbyte*>(reinterpret_cast<const jbyte*>(cvalue.c_str())));
- if (env->ExceptionCheck()) {
- // exception thrown: OutOfMemoryError
- *has_exception = true;
- return kStatusError;
- }
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
+}
- *has_exception = false;
- return cvalue_len;
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (J[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ rocksdb_delete_range_helper(env, db, default_write_options, nullptr,
+ jbegin_key, jbegin_key_off, jbegin_key_len,
+ jend_key, jend_key_off, jend_key_len);
}
-inline void multi_get_helper_release_keys(
- JNIEnv* env, std::vector<std::pair<jbyte*, jobject>>& keys_to_free) {
- auto end = keys_to_free.end();
- for (auto it = keys_to_free.begin(); it != end; ++it) {
- delete[] it->first;
- env->DeleteLocalRef(it->second);
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (J[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_delete_range_helper(env, db, default_write_options, cf_handle,
+ jbegin_key, jbegin_key_off, jbegin_key_len,
+ jend_key, jend_key_off, jend_key_len);
+ } else {
+ rocksdb::RocksDBExceptionJni::ThrowNew(
+ env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
}
- keys_to_free.clear();
}
-/**
- * cf multi get
- *
- * @return byte[][] of values or nullptr if an exception occurs
- */
-jobjectArray multi_get_helper(JNIEnv* env, jobject /*jdb*/, rocksdb::DB* db,
- const rocksdb::ReadOptions& rOpt,
- jobjectArray jkeys, jintArray jkey_offs,
- jintArray jkey_lens,
- jlongArray jcolumn_family_handles) {
- std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
- if (jcolumn_family_handles != nullptr) {
- const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
-
- jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
- if (jcfh == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
-
- for (jsize i = 0; i < len_cols; i++) {
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
- cf_handles.push_back(cf_handle);
- }
- env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
- }
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (JJ[BII[BII)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
+ rocksdb_delete_range_helper(env, db, *write_options, nullptr, jbegin_key,
+ jbegin_key_off, jbegin_key_len, jend_key,
+ jend_key_off, jend_key_len);
+}
- const jsize len_keys = env->GetArrayLength(jkeys);
- if (env->EnsureLocalCapacity(len_keys) != 0) {
- // exception thrown: OutOfMemoryError
- return nullptr;
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: deleteRange
+ * Signature: (JJ[BII[BIIJ)V
+ */
+void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options,
+ jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
+ jbyteArray jend_key, jint jend_key_off, jint jend_key_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_delete_range_helper(env, db, *write_options, cf_handle,
+ jbegin_key, jbegin_key_off, jbegin_key_len,
+ jend_key, jend_key_off, jend_key_len);
+ } else {
+ rocksdb::RocksDBExceptionJni::ThrowNew(
+ env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
}
+}
- jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr);
- if (jkey_off == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
+//////////////////////////////////////////////////////////////////////////////
+// rocksdb::DB::Merge
- jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr);
- if (jkey_len == nullptr) {
- // exception thrown: OutOfMemoryError
- env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
- return nullptr;
+/**
+ * @return true if the merge succeeded, false if a Java Exception was thrown
+ */
+bool rocksdb_merge_helper(
+ JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options,
+ rocksdb::ColumnFamilyHandle* cf_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ return false;
}
+ rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
- std::vector<rocksdb::Slice> keys;
- std::vector<std::pair<jbyte*, jobject>> keys_to_free;
- for (jsize i = 0; i < len_keys; i++) {
- jobject jkey = env->GetObjectArrayElement(jkeys, i);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
- env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
- multi_get_helper_release_keys(env, keys_to_free);
- return nullptr;
- }
-
- jbyteArray jkey_ba = reinterpret_cast<jbyteArray>(jkey);
-
- const jint len_key = jkey_len[i];
- jbyte* key = new jbyte[len_key];
- env->GetByteArrayRegion(jkey_ba, jkey_off[i], len_key, key);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- delete[] key;
- env->DeleteLocalRef(jkey);
- env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
- env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
- multi_get_helper_release_keys(env, keys_to_free);
- return nullptr;
- }
-
- rocksdb::Slice key_slice(reinterpret_cast<char*>(key), len_key);
- keys.push_back(key_slice);
-
- keys_to_free.push_back(std::pair<jbyte*, jobject>(key, jkey));
+ jbyte* value = new jbyte[jval_len];
+ env->GetByteArrayRegion(jval, jval_off, jval_len, value);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] value;
+ delete[] key;
+ return false;
}
+ rocksdb::Slice value_slice(reinterpret_cast<char*>(value), jval_len);
- // cleanup jkey_off and jken_len
- env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
- env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
-
- std::vector<std::string> values;
- std::vector<rocksdb::Status> s;
- if (cf_handles.size() == 0) {
- s = db->MultiGet(rOpt, keys, &values);
+ rocksdb::Status s;
+ if (cf_handle != nullptr) {
+ s = db->Merge(write_options, cf_handle, key_slice, value_slice);
} else {
- s = db->MultiGet(rOpt, cf_handles, keys, &values);
- }
-
- // free up allocated byte arrays
- multi_get_helper_release_keys(env, keys_to_free);
-
- // prepare the results
- jobjectArray jresults =
- rocksdb::ByteJni::new2dByteArray(env, static_cast<jsize>(s.size()));
- if (jresults == nullptr) {
- // exception occurred
- return nullptr;
+ s = db->Merge(write_options, key_slice, value_slice);
}
- // TODO(AR) it is not clear to me why EnsureLocalCapacity is needed for the
- // loop as we cleanup references with env->DeleteLocalRef(jentry_value);
- if (env->EnsureLocalCapacity(static_cast<jint>(s.size())) != 0) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
- // add to the jresults
- for (std::vector<rocksdb::Status>::size_type i = 0; i != s.size(); i++) {
- if (s[i].ok()) {
- std::string* value = &values[i];
- const jsize jvalue_len = static_cast<jsize>(value->size());
- jbyteArray jentry_value = env->NewByteArray(jvalue_len);
- if (jentry_value == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
-
- env->SetByteArrayRegion(
- jentry_value, 0, static_cast<jsize>(jvalue_len),
- const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value->c_str())));
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- env->DeleteLocalRef(jentry_value);
- return nullptr;
- }
-
- env->SetObjectArrayElement(jresults, static_cast<jsize>(i), jentry_value);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- env->DeleteLocalRef(jentry_value);
- return nullptr;
- }
+ // cleanup
+ delete[] value;
+ delete[] key;
- env->DeleteLocalRef(jentry_value);
- }
+ if (s.ok()) {
+ return true;
}
- return jresults;
-}
-
-/*
- * Class: org_rocksdb_RocksDB
- * Method: multiGet
- * Signature: (J[[B[I[I)[[B
- */
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I(
- JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys,
- jintArray jkey_offs, jintArray jkey_lens) {
- return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens,
- nullptr);
-}
-
-/*
- * Class: org_rocksdb_RocksDB
- * Method: multiGet
- * Signature: (J[[B[I[I[J)[[B
- */
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I_3J(
- JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys,
- jintArray jkey_offs, jintArray jkey_lens,
- jlongArray jcolumn_family_handles) {
- return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens,
- jcolumn_family_handles);
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return false;
}
/*
* Class: org_rocksdb_RocksDB
- * Method: multiGet
- * Signature: (JJ[[B[I[I)[[B
+ * Method: merge
+ * Signature: (J[BII[BII)V
*/
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I(
- JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
- jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) {
- return multi_get_helper(
- env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys, jkey_offs,
- jkey_lens, nullptr);
+void Java_org_rocksdb_RocksDB_merge__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ rocksdb_merge_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: multiGet
- * Signature: (JJ[[B[I[I[J)[[B
+ * Method: merge
+ * Signature: (J[BII[BIIJ)V
*/
-jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I_3J(
- JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
- jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens,
- jlongArray jcolumn_family_handles) {
- return multi_get_helper(
- env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys, jkey_offs,
- jkey_lens, jcolumn_family_handles);
+void Java_org_rocksdb_RocksDB_merge__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ static const rocksdb::WriteOptions default_write_options =
+ rocksdb::WriteOptions();
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ if (cf_handle != nullptr) {
+ rocksdb_merge_helper(env, db, default_write_options, cf_handle, jkey,
+ jkey_off, jkey_len, jval, jval_off, jval_len);
+ } else {
+ rocksdb::RocksDBExceptionJni::ThrowNew(
+ env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
+ }
}
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (J[BII[BII)I
+ * Method: merge
+ * Signature: (JJ[BII[BII)V
*/
-jint Java_org_rocksdb_RocksDB_get__J_3BII_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len) {
- bool has_exception = false;
- return rocksdb_get_helper(env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- rocksdb::ReadOptions(), nullptr, jkey, jkey_off,
- jkey_len, jval, jval_off, jval_len, &has_exception);
+void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
+ rocksdb_merge_helper(env, db, *write_options, nullptr, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (J[BII[BIIJ)I
+ * Method: merge
+ * Signature: (JJ[BII[BIIJ)V
*/
-jint Java_org_rocksdb_RocksDB_get__J_3BII_3BIIJ(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len,
- jlong jcf_handle) {
- auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- bool has_exception = false;
- return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle,
- jkey, jkey_off, jkey_len, jval, jval_off,
- jval_len, &has_exception);
+ rocksdb_merge_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- // will never be evaluated
- return 0;
}
}
+jlong rocksdb_iterator_helper(rocksdb::DB* db,
+ rocksdb::ReadOptions read_options,
+ rocksdb::ColumnFamilyHandle* cf_handle) {
+ rocksdb::Iterator* iterator = nullptr;
+ if (cf_handle != nullptr) {
+ iterator = db->NewIterator(read_options, cf_handle);
+ } else {
+ iterator = db->NewIterator(read_options);
+ }
+ return reinterpret_cast<jlong>(iterator);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// rocksdb::DB::Write
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (JJ[BII[BII)I
+ * Method: write0
+ * Signature: (JJJ)V
*/
-jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jropt_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len) {
- bool has_exception = false;
- return rocksdb_get_helper(
- env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
- *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), nullptr, jkey,
- jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception);
+void Java_org_rocksdb_RocksDB_write0(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options_handle, jlong jwb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
+ auto* wb = reinterpret_cast<rocksdb::WriteBatch*>(jwb_handle);
+
+ rocksdb::Status s = db->Write(*write_options, wb);
+
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
}
/*
* Class: org_rocksdb_RocksDB
- * Method: get
- * Signature: (JJ[BII[BIIJ)I
+ * Method: write1
+ * Signature: (JJJ)V
*/
-jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jropt_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len, jlong jcf_handle) {
- auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto& ro_opt = *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- if (cf_handle != nullptr) {
- bool has_exception = false;
- return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, jkey_off,
- jkey_len, jval, jval_off, jval_len,
- &has_exception);
- } else {
- rocksdb::RocksDBExceptionJni::ThrowNew(
- env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- // will never be evaluated
- return 0;
+void Java_org_rocksdb_RocksDB_write1(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jlong jwrite_options_handle, jlong jwbwi_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* write_options =
+ reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
+ auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(jwbwi_handle);
+ auto* wb = wbwi->GetWriteBatch();
+
+ rocksdb::Status s = db->Write(*write_options, wb);
+
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
}
//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Delete()
+// rocksdb::DB::Get
-/**
- * @return true if the delete succeeded, false if a Java Exception was thrown
- */
-bool rocksdb_delete_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::WriteOptions& write_options,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len) {
+jbyteArray rocksdb_get_helper(
+ JNIEnv* env, rocksdb::DB* db,
+ const rocksdb::ReadOptions& read_opt,
+ rocksdb::ColumnFamilyHandle* column_family_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
jbyte* key = new jbyte[jkey_len];
env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
if (env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
delete[] key;
- return false;
+ return nullptr;
}
+
rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+ std::string value;
rocksdb::Status s;
- if (cf_handle != nullptr) {
- s = db->Delete(write_options, cf_handle, key_slice);
+ if (column_family_handle != nullptr) {
+ s = db->Get(read_opt, column_family_handle, key_slice, &value);
} else {
// backwards compatibility
- s = db->Delete(write_options, key_slice);
+ s = db->Get(read_opt, key_slice, &value);
}
// cleanup
delete[] key;
+ if (s.IsNotFound()) {
+ return nullptr;
+ }
+
if (s.ok()) {
- return true;
+ jbyteArray jret_value = rocksdb::JniUtil::copyBytes(env, value);
+ if (jret_value == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+ return jret_value;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- return false;
+ return nullptr;
}
/*
* Class: org_rocksdb_RocksDB
- * Method: delete
- * Signature: (J[BII)V
+ * Method: get
+ * Signature: (J[BII)[B
*/
-void Java_org_rocksdb_RocksDB_delete__J_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle, jbyteArray jkey,
- jint jkey_off, jint jkey_len) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
- rocksdb_delete_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
- jkey_len);
+jbyteArray Java_org_rocksdb_RocksDB_get__J_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ return rocksdb_get_helper(env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ rocksdb::ReadOptions(), nullptr, jkey, jkey_off, jkey_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: delete
- * Signature: (J[BIIJ)V
+ * Method: get
+ * Signature: (J[BIIJ)[B
*/
-void Java_org_rocksdb_RocksDB_delete__J_3BIIJ(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle, jbyteArray jkey,
- jint jkey_off, jint jkey_len,
- jlong jcf_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
+ auto db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- rocksdb_delete_helper(env, db, default_write_options, cf_handle, jkey,
- jkey_off, jkey_len);
+ return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle,
+ jkey, jkey_off, jkey_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
+ return nullptr;
}
}
/*
* Class: org_rocksdb_RocksDB
- * Method: delete
- * Signature: (JJ[BII)V
+ * Method: get
+ * Signature: (JJ[BII)[B
*/
-void Java_org_rocksdb_RocksDB_delete__JJ_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jwrite_options,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
- rocksdb_delete_helper(env, db, *write_options, nullptr, jkey, jkey_off,
- jkey_len);
+jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BII(
+ JNIEnv* env, jobject,
+ jlong jdb_handle, jlong jropt_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len) {
+ return rocksdb_get_helper(
+ env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), nullptr, jkey,
+ jkey_off, jkey_len);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: delete
- * Signature: (JJ[BIIJ)V
+ * Method: get
+ * Signature: (JJ[BIIJ)[B
*/
-void Java_org_rocksdb_RocksDB_delete__JJ_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options,
+jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle,
jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
+ auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto& ro_opt = *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- rocksdb_delete_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
- jkey_len);
+ return rocksdb_get_helper(
+ env, db_handle, ro_opt, cf_handle, jkey, jkey_off, jkey_len);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
+ return nullptr;
}
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::SingleDelete()
-/**
- * @return true if the single delete succeeded, false if a Java Exception
- * was thrown
- */
-bool rocksdb_single_delete_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::WriteOptions& write_options,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jbyteArray jkey, jint jkey_len) {
- jbyte* key = env->GetByteArrayElements(jkey, nullptr);
- if (key == nullptr) {
+jint rocksdb_get_helper(
+ JNIEnv* env, rocksdb::DB* db, const rocksdb::ReadOptions& read_options,
+ rocksdb::ColumnFamilyHandle* column_family_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ bool* has_exception) {
+ static const int kNotFound = -1;
+ static const int kStatusError = -2;
+
+ jbyte* key = new jbyte[jkey_len];
+ env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
+ if (env->ExceptionCheck()) {
// exception thrown: OutOfMemoryError
- return false;
+ delete[] key;
+ *has_exception = true;
+ return kStatusError;
}
rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
+ // TODO(yhchiang): we might save one memory allocation here by adding
+ // a DB::Get() function which takes preallocated jbyte* as input.
+ std::string cvalue;
rocksdb::Status s;
- if (cf_handle != nullptr) {
- s = db->SingleDelete(write_options, cf_handle, key_slice);
+ if (column_family_handle != nullptr) {
+ s = db->Get(read_options, column_family_handle, key_slice, &cvalue);
} else {
// backwards compatibility
- s = db->SingleDelete(write_options, key_slice);
+ s = db->Get(read_options, key_slice, &cvalue);
}
- // trigger java unref on key and value.
- // by passing JNI_ABORT, it will simply release the reference without
- // copying the result back to the java byte array.
- env->ReleaseByteArrayElements(jkey, key, JNI_ABORT);
+ // cleanup
+ delete[] key;
- if (s.ok()) {
- return true;
+ if (s.IsNotFound()) {
+ *has_exception = false;
+ return kNotFound;
+ } else if (!s.ok()) {
+ *has_exception = true;
+ // Here since we are throwing a Java exception from c++ side.
+ // As a result, c++ does not know calling this function will in fact
+ // throwing an exception. As a result, the execution flow will
+ // not stop here, and codes after this throw will still be
+ // executed.
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+
+ // Return a dummy const value to avoid compilation error, although
+ // java side might not have a chance to get the return value :)
+ return kStatusError;
}
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- return false;
+ const jint cvalue_len = static_cast<jint>(cvalue.size());
+ const jint length = std::min(jval_len, cvalue_len);
+
+ env->SetByteArrayRegion(
+ jval, jval_off, length,
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(cvalue.c_str())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: OutOfMemoryError
+ *has_exception = true;
+ return kStatusError;
+ }
+
+ *has_exception = false;
+ return cvalue_len;
}
+
/*
* Class: org_rocksdb_RocksDB
- * Method: singleDelete
- * Signature: (J[BI)V
+ * Method: get
+ * Signature: (J[BII[BII)I
*/
-void Java_org_rocksdb_RocksDB_singleDelete__J_3BI(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey,
- jint jkey_len) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
- rocksdb_single_delete_helper(env, db, default_write_options, nullptr, jkey,
- jkey_len);
+jint Java_org_rocksdb_RocksDB_get__J_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ bool has_exception = false;
+ return rocksdb_get_helper(env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ rocksdb::ReadOptions(), nullptr, jkey, jkey_off,
+ jkey_len, jval, jval_off, jval_len, &has_exception);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: singleDelete
- * Signature: (J[BIJ)V
- */
-void Java_org_rocksdb_RocksDB_singleDelete__J_3BIJ(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey,
- jint jkey_len,
- jlong jcf_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
+ * Method: get
+ * Signature: (J[BII[BIIJ)I
+ */
+jint Java_org_rocksdb_RocksDB_get__J_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- rocksdb_single_delete_helper(env, db, default_write_options, cf_handle,
- jkey, jkey_len);
+ bool has_exception = false;
+ return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle,
+ jkey, jkey_off, jkey_len, jval, jval_off,
+ jval_len, &has_exception);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
+ // will never be evaluated
+ return 0;
}
}
/*
* Class: org_rocksdb_RocksDB
- * Method: singleDelete
- * Signature: (JJ[BIJ)V
+ * Method: get
+ * Signature: (JJ[BII[BII)I
*/
-void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BI(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jwrite_options,
- jbyteArray jkey,
- jint jkey_len) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
- rocksdb_single_delete_helper(env, db, *write_options, nullptr, jkey,
- jkey_len);
+jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BII(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len) {
+ bool has_exception = false;
+ return rocksdb_get_helper(
+ env, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), nullptr, jkey,
+ jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: singleDelete
- * Signature: (JJ[BIJ)V
+ * Method: get
+ * Signature: (JJ[BII[BIIJ)I
*/
-void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options,
- jbyteArray jkey, jint jkey_len, jlong jcf_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
+jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BIIJ(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jbyteArray jval, jint jval_off, jint jval_len,
+ jlong jcf_handle) {
+ auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto& ro_opt = *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- rocksdb_single_delete_helper(env, db, *write_options, cf_handle, jkey,
- jkey_len);
+ bool has_exception = false;
+ return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle,
+ jkey, jkey_off, jkey_len,
+ jval, jval_off, jval_len,
+ &has_exception);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
+ // will never be evaluated
+ return 0;
}
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::DeleteRange()
+inline void multi_get_helper_release_keys(
+ JNIEnv* env, std::vector<std::pair<jbyte*, jobject>>& keys_to_free) {
+ auto end = keys_to_free.end();
+ for (auto it = keys_to_free.begin(); it != end; ++it) {
+ delete[] it->first;
+ env->DeleteLocalRef(it->second);
+ }
+ keys_to_free.clear();
+}
+
/**
- * @return true if the delete range succeeded, false if a Java Exception
- * was thrown
+ * cf multi get
+ *
+ * @return byte[][] of values or nullptr if an exception occurs
*/
-bool rocksdb_delete_range_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::WriteOptions& write_options,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jbyteArray jbegin_key, jint jbegin_key_off,
- jint jbegin_key_len, jbyteArray jend_key,
- jint jend_key_off, jint jend_key_len) {
- jbyte* begin_key = new jbyte[jbegin_key_len];
- env->GetByteArrayRegion(jbegin_key, jbegin_key_off, jbegin_key_len,
- begin_key);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- delete[] begin_key;
- return false;
+jobjectArray multi_get_helper(
+ JNIEnv* env, jobject, rocksdb::DB* db, const rocksdb::ReadOptions& rOpt,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens,
+ jlongArray jcolumn_family_handles) {
+ std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
+ if (jcolumn_family_handles != nullptr) {
+ const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
+
+ jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
+ if (jcfh == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ for (jsize i = 0; i < len_cols; i++) {
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
+ cf_handles.push_back(cf_handle);
+ }
+ env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
}
- rocksdb::Slice begin_key_slice(reinterpret_cast<char*>(begin_key),
- jbegin_key_len);
- jbyte* end_key = new jbyte[jend_key_len];
- env->GetByteArrayRegion(jend_key, jend_key_off, jend_key_len, end_key);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- delete[] begin_key;
- delete[] end_key;
- return false;
+ const jsize len_keys = env->GetArrayLength(jkeys);
+ if (env->EnsureLocalCapacity(len_keys) != 0) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
}
- rocksdb::Slice end_key_slice(reinterpret_cast<char*>(end_key), jend_key_len);
- rocksdb::Status s =
- db->DeleteRange(write_options, cf_handle, begin_key_slice, end_key_slice);
+ jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr);
+ if (jkey_off == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
- // cleanup
- delete[] begin_key;
- delete[] end_key;
+ jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr);
+ if (jkey_len == nullptr) {
+ // exception thrown: OutOfMemoryError
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+ return nullptr;
+ }
- if (s.ok()) {
- return true;
+ std::vector<rocksdb::Slice> keys;
+ std::vector<std::pair<jbyte*, jobject>> keys_to_free;
+ for (jsize i = 0; i < len_keys; i++) {
+ jobject jkey = env->GetObjectArrayElement(jkeys, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+ multi_get_helper_release_keys(env, keys_to_free);
+ return nullptr;
+ }
+
+ jbyteArray jkey_ba = reinterpret_cast<jbyteArray>(jkey);
+
+ const jint len_key = jkey_len[i];
+ jbyte* key = new jbyte[len_key];
+ env->GetByteArrayRegion(jkey_ba, jkey_off[i], len_key, key);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete[] key;
+ env->DeleteLocalRef(jkey);
+ env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+ multi_get_helper_release_keys(env, keys_to_free);
+ return nullptr;
+ }
+
+ rocksdb::Slice key_slice(reinterpret_cast<char*>(key), len_key);
+ keys.push_back(key_slice);
+
+ keys_to_free.push_back(std::pair<jbyte*, jobject>(key, jkey));
}
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- return false;
+ // cleanup jkey_off and jken_len
+ env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT);
+ env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT);
+
+ std::vector<std::string> values;
+ std::vector<rocksdb::Status> s;
+ if (cf_handles.size() == 0) {
+ s = db->MultiGet(rOpt, keys, &values);
+ } else {
+ s = db->MultiGet(rOpt, cf_handles, keys, &values);
+ }
+
+ // free up allocated byte arrays
+ multi_get_helper_release_keys(env, keys_to_free);
+
+ // prepare the results
+ jobjectArray jresults =
+ rocksdb::ByteJni::new2dByteArray(env, static_cast<jsize>(s.size()));
+ if (jresults == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ // TODO(AR) it is not clear to me why EnsureLocalCapacity is needed for the
+ // loop as we cleanup references with env->DeleteLocalRef(jentry_value);
+ if (env->EnsureLocalCapacity(static_cast<jint>(s.size())) != 0) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ // add to the jresults
+ for (std::vector<rocksdb::Status>::size_type i = 0; i != s.size(); i++) {
+ if (s[i].ok()) {
+ std::string* value = &values[i];
+ const jsize jvalue_len = static_cast<jsize>(value->size());
+ jbyteArray jentry_value = env->NewByteArray(jvalue_len);
+ if (jentry_value == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetByteArrayRegion(
+ jentry_value, 0, static_cast<jsize>(jvalue_len),
+ const_cast<jbyte*>(reinterpret_cast<const jbyte*>(value->c_str())));
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jentry_value);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jresults, static_cast<jsize>(i), jentry_value);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jentry_value);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jentry_value);
+ }
+ }
+
+ return jresults;
}
/*
* Class: org_rocksdb_RocksDB
- * Method: deleteRange
- * Signature: (J[BII[BII)V
+ * Method: multiGet
+ * Signature: (J[[B[I[I)[[B
*/
-void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BII(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin_key,
- jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key,
- jint jend_key_off, jint jend_key_len) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
- rocksdb_delete_range_helper(env, db, default_write_options, nullptr,
- jbegin_key, jbegin_key_off, jbegin_key_len,
- jend_key, jend_key_off, jend_key_len);
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I(
+ JNIEnv* env, jobject jdb, jlong jdb_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) {
+ return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens,
+ nullptr);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: deleteRange
- * Signature: (J[BII[BIIJ)V
+ * Method: multiGet
+ * Signature: (J[[B[I[I[J)[[B
*/
-void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin_key,
- jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key,
- jint jend_key_off, jint jend_key_len, jlong jcf_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- if (cf_handle != nullptr) {
- rocksdb_delete_range_helper(env, db, default_write_options, cf_handle,
- jbegin_key, jbegin_key_off, jbegin_key_len,
- jend_key, jend_key_off, jend_key_len);
- } else {
- rocksdb::RocksDBExceptionJni::ThrowNew(
- env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- }
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I_3J(
+ JNIEnv* env, jobject jdb, jlong jdb_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens,
+ jlongArray jcolumn_family_handles) {
+ return multi_get_helper(env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ rocksdb::ReadOptions(), jkeys, jkey_offs, jkey_lens,
+ jcolumn_family_handles);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: deleteRange
- * Signature: (JJ[BII[BII)V
+ * Method: multiGet
+ * Signature: (JJ[[B[I[I)[[B
*/
-void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BII(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options,
- jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
- jbyteArray jend_key, jint jend_key_off, jint jend_key_len) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
- rocksdb_delete_range_helper(env, db, *write_options, nullptr, jbegin_key,
- jbegin_key_off, jbegin_key_len, jend_key,
- jend_key_off, jend_key_len);
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I(
+ JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) {
+ return multi_get_helper(
+ env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys, jkey_offs,
+ jkey_lens, nullptr);
}
/*
- * Class: org_rocksdb_RocksDB
- * Method: deleteRange
- * Signature: (JJ[BII[BIIJ)V
+ * Class: org_rocksdb_RocksDB
+ * Method: multiGet
+ * Signature: (JJ[[B[I[I[J)[[B
*/
-void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options,
- jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len,
- jbyteArray jend_key, jint jend_key_off, jint jend_key_len,
- jlong jcf_handle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- if (cf_handle != nullptr) {
- rocksdb_delete_range_helper(env, db, *write_options, cf_handle, jbegin_key,
- jbegin_key_off, jbegin_key_len, jend_key,
- jend_key_off, jend_key_len);
- } else {
- rocksdb::RocksDBExceptionJni::ThrowNew(
- env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
- }
+jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I_3J(
+ JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle,
+ jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens,
+ jlongArray jcolumn_family_handles) {
+ return multi_get_helper(
+ env, jdb, reinterpret_cast<rocksdb::DB*>(jdb_handle),
+ *reinterpret_cast<rocksdb::ReadOptions*>(jropt_handle), jkeys, jkey_offs,
+ jkey_lens, jcolumn_family_handles);
}
//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Merge
-
-/**
- * @return true if the merge succeeded, false if a Java Exception was thrown
- */
-bool rocksdb_merge_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::WriteOptions& write_options,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len,
- jbyteArray jval, jint jval_off, jint jval_len) {
+// rocksdb::DB::KeyMayExist
+jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db,
+ const rocksdb::ReadOptions& read_opt,
+ rocksdb::ColumnFamilyHandle* cf_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jobject jstring_builder, bool* has_exception) {
jbyte* key = new jbyte[jkey_len];
env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
if (env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
delete[] key;
+ *has_exception = true;
return false;
}
- rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
- jbyte* value = new jbyte[jval_len];
- env->GetByteArrayRegion(jval, jval_off, jval_len, value);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- delete[] value;
- delete[] key;
- return false;
- }
- rocksdb::Slice value_slice(reinterpret_cast<char*>(value), jval_len);
+ rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len);
- rocksdb::Status s;
+ std::string value;
+ bool value_found = false;
+ bool keyMayExist;
if (cf_handle != nullptr) {
- s = db->Merge(write_options, cf_handle, key_slice, value_slice);
+ keyMayExist =
+ db->KeyMayExist(read_opt, cf_handle, key_slice, &value, &value_found);
} else {
- s = db->Merge(write_options, key_slice, value_slice);
+ keyMayExist = db->KeyMayExist(read_opt, key_slice, &value, &value_found);
}
// cleanup
- delete[] value;
delete[] key;
- if (s.ok()) {
- return true;
+ // extract the value
+ if (value_found && !value.empty()) {
+ jobject jresult_string_builder =
+ rocksdb::StringBuilderJni::append(env, jstring_builder, value.c_str());
+ if (jresult_string_builder == nullptr) {
+ *has_exception = true;
+ return false;
+ }
}
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- return false;
+ *has_exception = false;
+ return static_cast<jboolean>(keyMayExist);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: merge
- * Signature: (J[BII[BII)V
+ * Method: keyMayExist
+ * Signature: (J[BIILjava/lang/StringBuilder;)Z
*/
-void Java_org_rocksdb_RocksDB_merge__J_3BII_3BII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jkey, jint jkey_off,
- jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len) {
+jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIILjava_lang_StringBuilder_2(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jobject jstring_builder) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
-
- rocksdb_merge_helper(env, db, default_write_options, nullptr, jkey, jkey_off,
- jkey_len, jval, jval_off, jval_len);
+ bool has_exception = false;
+ return key_may_exist_helper(env, db, rocksdb::ReadOptions(), nullptr, jkey,
+ jkey_off, jkey_len, jstring_builder, &has_exception);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: merge
- * Signature: (J[BII[BIIJ)V
+ * Method: keyMayExist
+ * Signature: (J[BIIJLjava/lang/StringBuilder;)Z
*/
-void Java_org_rocksdb_RocksDB_merge__J_3BII_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jkey,
- jint jkey_off, jint jkey_len, jbyteArray jval, jint jval_off, jint jval_len,
- jlong jcf_handle) {
+jboolean
+Java_org_rocksdb_RocksDB_keyMayExist__J_3BIIJLjava_lang_StringBuilder_2(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len,
+ jlong jcf_handle, jobject jstring_builder) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- static const rocksdb::WriteOptions default_write_options =
- rocksdb::WriteOptions();
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- rocksdb_merge_helper(env, db, default_write_options, cf_handle, jkey,
- jkey_off, jkey_len, jval, jval_off, jval_len);
+ bool has_exception = false;
+ return key_may_exist_helper(env, db, rocksdb::ReadOptions(), cf_handle,
+ jkey, jkey_off, jkey_len, jstring_builder,
+ &has_exception);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
+ return true;
}
}
/*
* Class: org_rocksdb_RocksDB
- * Method: merge
- * Signature: (JJ[BII[BII)V
+ * Method: keyMayExist
+ * Signature: (JJ[BIILjava/lang/StringBuilder;)Z
*/
-void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BII(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len) {
+jboolean
+Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIILjava_lang_StringBuilder_2(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jread_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jobject jstring_builder) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
-
- rocksdb_merge_helper(env, db, *write_options, nullptr, jkey, jkey_off,
- jkey_len, jval, jval_off, jval_len);
+ auto& read_options =
+ *reinterpret_cast<rocksdb::ReadOptions*>(jread_options_handle);
+ bool has_exception = false;
+ return key_may_exist_helper(env, db, read_options, nullptr, jkey, jkey_off,
+ jkey_len, jstring_builder, &has_exception);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: merge
- * Signature: (JJ[BII[BIIJ)V
+ * Method: keyMayExist
+ * Signature: (JJ[BIIJLjava/lang/StringBuilder;)Z
*/
-void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle,
- jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval,
- jint jval_off, jint jval_len, jlong jcf_handle) {
+jboolean
+Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIIJLjava_lang_StringBuilder_2(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jread_options_handle,
+ jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle,
+ jobject jstring_builder) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* write_options =
- reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
+ auto& read_options =
+ *reinterpret_cast<rocksdb::ReadOptions*>(jread_options_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
if (cf_handle != nullptr) {
- rocksdb_merge_helper(env, db, *write_options, cf_handle, jkey, jkey_off,
- jkey_len, jval, jval_off, jval_len);
+ bool has_exception = false;
+ return key_may_exist_helper(env, db, read_options, cf_handle, jkey,
+ jkey_off, jkey_len, jstring_builder, &has_exception);
} else {
rocksdb::RocksDBExceptionJni::ThrowNew(
env, rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle."));
+ return true;
}
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::~DB()
-
-/*
- * Class: org_rocksdb_RocksDB
- * Method: disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_RocksDB_disposeInternal(JNIEnv* /*env*/,
- jobject /*java_db*/,
- jlong jhandle) {
- auto* db = reinterpret_cast<rocksdb::DB*>(jhandle);
- assert(db != nullptr);
- delete db;
-}
-
-jlong rocksdb_iterator_helper(rocksdb::DB* db,
- rocksdb::ReadOptions read_options,
- rocksdb::ColumnFamilyHandle* cf_handle) {
- rocksdb::Iterator* iterator = nullptr;
- if (cf_handle != nullptr) {
- iterator = db->NewIterator(read_options, cf_handle);
- } else {
- iterator = db->NewIterator(read_options);
- }
- return reinterpret_cast<jlong>(iterator);
-}
-
/*
* Class: org_rocksdb_RocksDB
* Method: iterator
* Signature: (J)J
*/
-jlong Java_org_rocksdb_RocksDB_iterator__J(JNIEnv* /*env*/, jobject /*jdb*/,
- jlong db_handle) {
+jlong Java_org_rocksdb_RocksDB_iterator__J(
+ JNIEnv*, jobject, jlong db_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
return rocksdb_iterator_helper(db, rocksdb::ReadOptions(), nullptr);
}
* Method: iterator
* Signature: (JJ)J
*/
-jlong Java_org_rocksdb_RocksDB_iterator__JJ(JNIEnv* /*env*/, jobject /*jdb*/,
- jlong db_handle,
- jlong jread_options_handle) {
+jlong Java_org_rocksdb_RocksDB_iterator__JJ(
+ JNIEnv*, jobject, jlong db_handle, jlong jread_options_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
auto& read_options =
*reinterpret_cast<rocksdb::ReadOptions*>(jread_options_handle);
* Method: iteratorCF
* Signature: (JJ)J
*/
-jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ(JNIEnv* /*env*/, jobject /*jdb*/,
- jlong db_handle,
- jlong jcf_handle) {
+jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ(
+ JNIEnv*, jobject, jlong db_handle, jlong jcf_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
return rocksdb_iterator_helper(db, rocksdb::ReadOptions(), cf_handle);
* Method: iteratorCF
* Signature: (JJJ)J
*/
-jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(JNIEnv* /*env*/, jobject /*jdb*/,
- jlong db_handle,
- jlong jcf_handle,
- jlong jread_options_handle) {
+jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(
+ JNIEnv*, jobject,
+ jlong db_handle, jlong jcf_handle, jlong jread_options_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
auto& read_options =
* Method: iterators
* Signature: (J[JJ)[J
*/
-jlongArray Java_org_rocksdb_RocksDB_iterators(JNIEnv* env, jobject /*jdb*/,
- jlong db_handle,
- jlongArray jcolumn_family_handles,
- jlong jread_options_handle) {
+jlongArray Java_org_rocksdb_RocksDB_iterators(
+ JNIEnv* env, jobject, jlong db_handle,
+ jlongArray jcolumn_family_handles,
+ jlong jread_options_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
auto& read_options =
*reinterpret_cast<rocksdb::ReadOptions*>(jread_options_handle);
}
}
-/*
- * Class: org_rocksdb_RocksDB
- * Method: getDefaultColumnFamily
- * Signature: (J)J
- */
-jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jdb_handle) {
- auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cf_handle = db_handle->DefaultColumnFamily();
- return reinterpret_cast<jlong>(cf_handle);
-}
-
-/*
- * Class: org_rocksdb_RocksDB
- * Method: createColumnFamily
- * Signature: (J[BJ)J
- */
-jlong Java_org_rocksdb_RocksDB_createColumnFamily(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jbyteArray jcolumn_name,
- jlong jcolumn_options) {
- rocksdb::ColumnFamilyHandle* handle;
- jboolean has_exception = JNI_FALSE;
- std::string column_name = rocksdb::JniUtil::byteString<std::string>(
- env, jcolumn_name,
- [](const char* str, const size_t len) { return std::string(str, len); },
- &has_exception);
- if (has_exception == JNI_TRUE) {
- // exception occurred
- return 0;
- }
-
- auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cfOptions =
- reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jcolumn_options);
-
- rocksdb::Status s =
- db_handle->CreateColumnFamily(*cfOptions, column_name, &handle);
-
- if (s.ok()) {
- return reinterpret_cast<jlong>(handle);
- }
-
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- return 0;
-}
-
-/*
- * Class: org_rocksdb_RocksDB
- * Method: dropColumnFamily
- * Signature: (JJ)V;
- */
-void Java_org_rocksdb_RocksDB_dropColumnFamily(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jcf_handle) {
- auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- rocksdb::Status s = db_handle->DropColumnFamily(cf_handle);
- if (!s.ok()) {
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- }
-}
-
-/*
- * Class: org_rocksdb_RocksDB
- * Method: dropColumnFamilies
- * Signature: (J[J)V
- */
-void Java_org_rocksdb_RocksDB_dropColumnFamilies(
- JNIEnv* env, jobject, jlong jdb_handle, jlongArray jcolumn_family_handles) {
- auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
-
- std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
- if (jcolumn_family_handles != nullptr) {
- const jsize len_cols = env->GetArrayLength(jcolumn_family_handles);
-
- jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr);
- if (jcfh == nullptr) {
- // exception thrown: OutOfMemoryError
- return;
- }
-
- for (jsize i = 0; i < len_cols; i++) {
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcfh[i]);
- cf_handles.push_back(cf_handle);
- }
- env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT);
- }
-
- rocksdb::Status s = db_handle->DropColumnFamilies(cf_handles);
- if (!s.ok()) {
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- }
-}
-
/*
* Method: getSnapshot
* Signature: (J)J
*/
-jlong Java_org_rocksdb_RocksDB_getSnapshot(JNIEnv* /*env*/, jobject /*jdb*/,
- jlong db_handle) {
+jlong Java_org_rocksdb_RocksDB_getSnapshot(
+ JNIEnv*, jobject, jlong db_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
const rocksdb::Snapshot* snapshot = db->GetSnapshot();
return reinterpret_cast<jlong>(snapshot);
* Method: releaseSnapshot
* Signature: (JJ)V
*/
-void Java_org_rocksdb_RocksDB_releaseSnapshot(JNIEnv* /*env*/, jobject /*jdb*/,
- jlong db_handle,
- jlong snapshot_handle) {
+void Java_org_rocksdb_RocksDB_releaseSnapshot(
+ JNIEnv*, jobject, jlong db_handle,
+ jlong snapshot_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
auto* snapshot = reinterpret_cast<rocksdb::Snapshot*>(snapshot_handle);
db->ReleaseSnapshot(snapshot);
/*
* Class: org_rocksdb_RocksDB
- * Method: getProperty0
- * Signature: (JLjava/lang/String;I)Ljava/lang/String;
+ * Method: getProperty
+ * Signature: (JJLjava/lang/String;I)Ljava/lang/String;
*/
-jstring Java_org_rocksdb_RocksDB_getProperty0__JLjava_lang_String_2I(
- JNIEnv* env, jobject /*jdb*/, jlong db_handle, jstring jproperty,
- jint jproperty_len) {
+jstring Java_org_rocksdb_RocksDB_getProperty(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jstring jproperty, jint jproperty_len) {
const char* property = env->GetStringUTFChars(jproperty, nullptr);
if (property == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
- rocksdb::Slice property_slice(property, jproperty_len);
+ rocksdb::Slice property_name(property, jproperty_len);
+
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
- auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
std::string property_value;
- bool retCode = db->GetProperty(property_slice, &property_value);
+ bool retCode = db->GetProperty(cf_handle, property_name, &property_value);
env->ReleaseStringUTFChars(jproperty, property);
if (retCode) {
/*
* Class: org_rocksdb_RocksDB
- * Method: getProperty0
- * Signature: (JJLjava/lang/String;I)Ljava/lang/String;
+ * Method: getMapProperty
+ * Signature: (JJLjava/lang/String;I)Ljava/util/Map;
*/
-jstring Java_org_rocksdb_RocksDB_getProperty0__JJLjava_lang_String_2I(
- JNIEnv* env, jobject /*jdb*/, jlong db_handle, jlong jcf_handle,
+jobject Java_org_rocksdb_RocksDB_getMapProperty(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
jstring jproperty, jint jproperty_len) {
- const char* property = env->GetStringUTFChars(jproperty, nullptr);
+ const char* property = env->GetStringUTFChars(jproperty, nullptr);
if (property == nullptr) {
// exception thrown: OutOfMemoryError
return nullptr;
}
- rocksdb::Slice property_slice(property, jproperty_len);
-
- auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- std::string property_value;
- bool retCode = db->GetProperty(cf_handle, property_slice, &property_value);
- env->ReleaseStringUTFChars(jproperty, property);
-
- if (retCode) {
- return env->NewStringUTF(property_value.c_str());
- }
-
- rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound());
- return nullptr;
-}
-
-/*
- * Class: org_rocksdb_RocksDB
- * Method: getLongProperty
- * Signature: (JLjava/lang/String;I)L;
- */
-jlong Java_org_rocksdb_RocksDB_getLongProperty__JLjava_lang_String_2I(
- JNIEnv* env, jobject /*jdb*/, jlong db_handle, jstring jproperty,
- jint jproperty_len) {
- const char* property = env->GetStringUTFChars(jproperty, nullptr);
- if (property == nullptr) {
- // exception thrown: OutOfMemoryError
- return 0;
+ rocksdb::Slice property_name(property, jproperty_len);
+
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
}
- rocksdb::Slice property_slice(property, jproperty_len);
- auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
- uint64_t property_value = 0;
- bool retCode = db->GetIntProperty(property_slice, &property_value);
+ std::map<std::string, std::string> property_value;
+ bool retCode = db->GetMapProperty(cf_handle, property_name, &property_value);
env->ReleaseStringUTFChars(jproperty, property);
if (retCode) {
- return property_value;
+ return rocksdb::HashMapJni::fromCppMap(env, &property_value);
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound());
- return 0;
+ return nullptr;
}
/*
* Class: org_rocksdb_RocksDB
* Method: getLongProperty
- * Signature: (JJLjava/lang/String;I)L;
+ * Signature: (JJLjava/lang/String;I)J
*/
-jlong Java_org_rocksdb_RocksDB_getLongProperty__JJLjava_lang_String_2I(
- JNIEnv* env, jobject /*jdb*/, jlong db_handle, jlong jcf_handle,
+jlong Java_org_rocksdb_RocksDB_getLongProperty(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
jstring jproperty, jint jproperty_len) {
const char* property = env->GetStringUTFChars(jproperty, nullptr);
if (property == nullptr) {
// exception thrown: OutOfMemoryError
return 0;
}
- rocksdb::Slice property_slice(property, jproperty_len);
+ rocksdb::Slice property_name(property, jproperty_len);
+
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
- auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
uint64_t property_value;
- bool retCode = db->GetIntProperty(cf_handle, property_slice, &property_value);
+ bool retCode = db->GetIntProperty(cf_handle, property_name, &property_value);
env->ReleaseStringUTFChars(jproperty, property);
if (retCode) {
return 0;
}
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: resetStats
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_resetStats(
+ JNIEnv *, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ db->ResetStats();
+}
+
/*
* Class: org_rocksdb_RocksDB
* Method: getAggregatedLongProperty
* Signature: (JLjava/lang/String;I)J
*/
jlong Java_org_rocksdb_RocksDB_getAggregatedLongProperty(
- JNIEnv* env, jobject, jlong db_handle, jstring jproperty, jint jproperty_len) {
+ JNIEnv* env, jobject, jlong db_handle,
+ jstring jproperty, jint jproperty_len) {
const char* property = env->GetStringUTFChars(jproperty, nullptr);
if (property == nullptr) {
return 0;
}
- rocksdb::Slice property_slice(property, jproperty_len);
+ rocksdb::Slice property_name(property, jproperty_len);
auto* db = reinterpret_cast<rocksdb::DB*>(db_handle);
uint64_t property_value = 0;
- bool retCode = db->GetAggregatedIntProperty(property_slice, &property_value);
+ bool retCode = db->GetAggregatedIntProperty(property_name, &property_value);
env->ReleaseStringUTFChars(jproperty, property);
if (retCode) {
return 0;
}
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getApproximateSizes
+ * Signature: (JJ[JB)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_getApproximateSizes(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlongArray jrange_slice_handles, jbyte jinclude_flags) {
+ const jsize jlen = env->GetArrayLength(jrange_slice_handles);
+ const size_t range_count = jlen / 2;
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::Flush
+ jboolean jranges_is_copy = JNI_FALSE;
+ jlong* jranges = env->GetLongArrayElements(jrange_slice_handles,
+ &jranges_is_copy);
+ if (jranges == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
-void rocksdb_flush_helper(JNIEnv* env, rocksdb::DB* db,
- const rocksdb::FlushOptions& flush_options,
- rocksdb::ColumnFamilyHandle* column_family_handle) {
- rocksdb::Status s;
- if (column_family_handle != nullptr) {
- s = db->Flush(flush_options, column_family_handle);
+ auto ranges = std::unique_ptr<rocksdb::Range[]>(
+ new rocksdb::Range[range_count]);
+ for (jsize i = 0; i < jlen; ++i) {
+ auto* start = reinterpret_cast<rocksdb::Slice*>(jranges[i]);
+ auto* limit = reinterpret_cast<rocksdb::Slice*>(jranges[++i]);
+ ranges.get()[i] = rocksdb::Range(*start, *limit);
+ }
+
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
} else {
- s = db->Flush(flush_options);
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
}
- if (!s.ok()) {
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+
+ auto sizes = std::unique_ptr<uint64_t[]>(new uint64_t[range_count]);
+ db->GetApproximateSizes(cf_handle, ranges.get(),
+ static_cast<int>(range_count), sizes.get(),
+ static_cast<uint8_t>(jinclude_flags));
+
+ // release LongArrayElements
+ env->ReleaseLongArrayElements(jrange_slice_handles, jranges, JNI_ABORT);
+
+ // prepare results
+ auto results = std::unique_ptr<jlong[]>(new jlong[range_count]);
+ for (size_t i = 0; i < range_count; ++i) {
+ results.get()[i] = static_cast<jlong>(sizes.get()[i]);
+ }
+
+ const jsize jrange_count = jlen / 2;
+ jlongArray jresults = env->NewLongArray(jrange_count);
+ if (jresults == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jresults, 0, jrange_count, results.get());
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jresults);
+ return nullptr;
}
+
+ return jresults;
}
/*
* Class: org_rocksdb_RocksDB
- * Method: flush
- * Signature: (JJ)V
+ * Method: getApproximateMemTableStats
+ * Signature: (JJJJ)[J
*/
-void Java_org_rocksdb_RocksDB_flush__JJ(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jflush_options) {
+jlongArray Java_org_rocksdb_RocksDB_getApproximateMemTableStats(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlong jstartHandle, jlong jlimitHandle) {
+ auto* start = reinterpret_cast<rocksdb::Slice*>(jstartHandle);
+ auto* limit = reinterpret_cast<rocksdb::Slice*>( jlimitHandle);
+ const rocksdb::Range range(*start, *limit);
+
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* flush_options =
- reinterpret_cast<rocksdb::FlushOptions*>(jflush_options);
- rocksdb_flush_helper(env, db, *flush_options, nullptr);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+
+ uint64_t count = 0;
+ uint64_t sizes = 0;
+ db->GetApproximateMemTableStats(cf_handle, range, &count, &sizes);
+
+ // prepare results
+ jlong results[2] = {
+ static_cast<jlong>(count),
+ static_cast<jlong>(sizes)};
+
+ const jsize jcount = static_cast<jsize>(count);
+ jlongArray jsizes = env->NewLongArray(jcount);
+ if (jsizes == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ env->SetLongArrayRegion(jsizes, 0, jcount, results);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jsizes);
+ return nullptr;
+ }
+
+ return jsizes;
}
/*
* Class: org_rocksdb_RocksDB
- * Method: flush
- * Signature: (JJJ)V
+ * Method: compactRange
+ * Signature: (J[BI[BIJJ)V
*/
-void Java_org_rocksdb_RocksDB_flush__JJJ(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle, jlong jflush_options,
- jlong jcf_handle) {
+void Java_org_rocksdb_RocksDB_compactRange(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jbyteArray jbegin, jint jbegin_len,
+ jbyteArray jend, jint jend_len,
+ jlong jcompact_range_opts_handle,
+ jlong jcf_handle) {
+ jboolean has_exception = JNI_FALSE;
+
+ std::string str_begin;
+ if (jbegin_len > 0) {
+ str_begin = rocksdb::JniUtil::byteString<std::string>(env, jbegin, jbegin_len,
+ [](const char* str, const size_t len) {
+ return std::string(str, len);
+ },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ }
+
+ std::string str_end;
+ if (jend_len > 0) {
+ str_end = rocksdb::JniUtil::byteString<std::string>(env, jend, jend_len,
+ [](const char* str, const size_t len) {
+ return std::string(str, len);
+ },
+ &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ }
+
+ rocksdb::CompactRangeOptions *compact_range_opts = nullptr;
+ if (jcompact_range_opts_handle == 0) {
+ // NOTE: we DO own the pointer!
+ compact_range_opts = new rocksdb::CompactRangeOptions();
+ } else {
+ // NOTE: we do NOT own the pointer!
+ compact_range_opts =
+ reinterpret_cast<rocksdb::CompactRangeOptions*>(jcompact_range_opts_handle);
+ }
+
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* flush_options =
- reinterpret_cast<rocksdb::FlushOptions*>(jflush_options);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- rocksdb_flush_helper(env, db, *flush_options, cf_handle);
-}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::CompactRange - Full
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
-void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jboolean jreduce_level, jint jtarget_level,
- jint jtarget_path_id) {
rocksdb::Status s;
- rocksdb::CompactRangeOptions compact_options;
- compact_options.change_level = jreduce_level;
- compact_options.target_level = jtarget_level;
- compact_options.target_path_id = static_cast<uint32_t>(jtarget_path_id);
- if (cf_handle != nullptr) {
- s = db->CompactRange(compact_options, cf_handle, nullptr, nullptr);
+ if (jbegin_len > 0 || jend_len > 0) {
+ const rocksdb::Slice begin(str_begin);
+ const rocksdb::Slice end(str_end);
+ s = db->CompactRange(*compact_range_opts, cf_handle, &begin, &end);
} else {
- // backwards compatibility
- s = db->CompactRange(compact_options, nullptr, nullptr);
+ s = db->CompactRange(*compact_range_opts, cf_handle, nullptr, nullptr);
}
- if (s.ok()) {
- return;
+ if (jcompact_range_opts_handle == 0) {
+ delete compact_range_opts;
}
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: compactRange0
- * Signature: (JZII)V
+ * Method: setOptions
+ * Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V
*/
-void Java_org_rocksdb_RocksDB_compactRange0__JZII(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jboolean jreduce_level,
- jint jtarget_level,
- jint jtarget_path_id) {
+void Java_org_rocksdb_RocksDB_setOptions(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jobjectArray jkeys, jobjectArray jvalues) {
+ const jsize len = env->GetArrayLength(jkeys);
+ assert(len == env->GetArrayLength(jvalues));
+
+ std::unordered_map<std::string, std::string> options_map;
+ for (jsize i = 0; i < len; i++) {
+ jobject jobj_key = env->GetObjectArrayElement(jkeys, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return;
+ }
+
+ jobject jobj_value = env->GetObjectArrayElement(jvalues, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ jboolean has_exception = JNI_FALSE;
+ std::string s_key =
+ rocksdb::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_key), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ std::string s_value =
+ rocksdb::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_value), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ options_map[s_key] = s_value;
+
+ env->DeleteLocalRef(jobj_key);
+ env->DeleteLocalRef(jobj_value);
+ }
+
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- rocksdb_compactrange_helper(env, db, nullptr, jreduce_level, jtarget_level,
- jtarget_path_id);
+ auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ auto s = db->SetOptions(cf_handle, options_map);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
}
/*
* Class: org_rocksdb_RocksDB
- * Method: compactRange
- * Signature: (JZIIJ)V
+ * Method: setDBOptions
+ * Signature: (J[Ljava/lang/String;[Ljava/lang/String;)V
*/
-void Java_org_rocksdb_RocksDB_compactRange__JZIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jboolean jreduce_level,
- jint jtarget_level, jint jtarget_path_id, jlong jcf_handle) {
+void Java_org_rocksdb_RocksDB_setDBOptions(
+ JNIEnv* env, jobject, jlong jdb_handle,
+ jobjectArray jkeys, jobjectArray jvalues) {
+ const jsize len = env->GetArrayLength(jkeys);
+ assert(len == env->GetArrayLength(jvalues));
+
+ std::unordered_map<std::string, std::string> options_map;
+ for (jsize i = 0; i < len; i++) {
+ jobject jobj_key = env->GetObjectArrayElement(jkeys, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ return;
+ }
+
+ jobject jobj_value = env->GetObjectArrayElement(jvalues, i);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ jboolean has_exception = JNI_FALSE;
+ std::string s_key =
+ rocksdb::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_key), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ std::string s_value =
+ rocksdb::JniUtil::copyStdString(
+ env, reinterpret_cast<jstring>(jobj_value), &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jobj_key);
+ return;
+ }
+
+ options_map[s_key] = s_value;
+
+ env->DeleteLocalRef(jobj_key);
+ env->DeleteLocalRef(jobj_value);
+ }
+
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- rocksdb_compactrange_helper(env, db, cf_handle, jreduce_level, jtarget_level,
- jtarget_path_id);
+ auto s = db->SetDBOptions(options_map);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::CompactRange - Range
-
-/**
- * @return true if the compact range succeeded, false if a Java Exception
- * was thrown
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: compactFiles
+ * Signature: (JJJ[Ljava/lang/String;IIJ)[Ljava/lang/String;
*/
-bool rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jbyteArray jbegin, jint jbegin_len,
- jbyteArray jend, jint jend_len,
- const rocksdb::CompactRangeOptions& compact_options) {
- jbyte* begin = env->GetByteArrayElements(jbegin, nullptr);
- if (begin == nullptr) {
- // exception thrown: OutOfMemoryError
- return false;
+jobjectArray Java_org_rocksdb_RocksDB_compactFiles(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcompaction_opts_handle,
+ jlong jcf_handle, jobjectArray jinput_file_names, jint joutput_level,
+ jint joutput_path_id, jlong jcompaction_job_info_handle) {
+ jboolean has_exception = JNI_FALSE;
+ const std::vector<std::string> input_file_names =
+ rocksdb::JniUtil::copyStrings(env, jinput_file_names, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return nullptr;
}
- jbyte* end = env->GetByteArrayElements(jend, nullptr);
- if (end == nullptr) {
- // exception thrown: OutOfMemoryError
- env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT);
- return false;
+ auto* compaction_opts =
+ reinterpret_cast<rocksdb::CompactionOptions*>(jcompaction_opts_handle);
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
}
- const rocksdb::Slice begin_slice(reinterpret_cast<char*>(begin), jbegin_len);
- const rocksdb::Slice end_slice(reinterpret_cast<char*>(end), jend_len);
+ rocksdb::CompactionJobInfo* compaction_job_info = nullptr;
+ if (jcompaction_job_info_handle != 0) {
+ compaction_job_info =
+ reinterpret_cast<rocksdb::CompactionJobInfo*>(jcompaction_job_info_handle);
+ }
- rocksdb::Status s;
- if (cf_handle != nullptr) {
- s = db->CompactRange(compact_options, cf_handle, &begin_slice, &end_slice);
- } else {
- // backwards compatibility
- s = db->CompactRange(compact_options, &begin_slice, &end_slice);
+ std::vector<std::string> output_file_names;
+ auto s = db->CompactFiles(*compaction_opts, cf_handle, input_file_names,
+ static_cast<int>(joutput_level), static_cast<int>(joutput_path_id),
+ &output_file_names, compaction_job_info);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
}
- env->ReleaseByteArrayElements(jend, end, JNI_ABORT);
- env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT);
+ return rocksdb::JniUtil::toJavaStrings(env, &output_file_names);
+}
- if (s.ok()) {
- return true;
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: pauseBackgroundWork
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_pauseBackgroundWork(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto s = db->PauseBackgroundWork();
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
+}
- rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
- return false;
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: continueBackgroundWork
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_continueBackgroundWork(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto s = db->ContinueBackgroundWork();
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
}
-/**
- * @return true if the compact range succeeded, false if a Java Exception
- * was thrown
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: enableAutoCompaction
+ * Signature: (J[J)V
+ */
+void Java_org_rocksdb_RocksDB_enableAutoCompaction(
+ JNIEnv* env, jobject, jlong jdb_handle, jlongArray jcf_handles) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ jboolean has_exception = JNI_FALSE;
+ const std::vector<rocksdb::ColumnFamilyHandle*> cf_handles =
+ rocksdb::JniUtil::fromJPointers<rocksdb::ColumnFamilyHandle>(env, jcf_handles, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ db->EnableAutoCompaction(cf_handles);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: numberLevels
+ * Signature: (JJ)I
+ */
+jint Java_org_rocksdb_RocksDB_numberLevels(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ return static_cast<jint>(db->NumberLevels(cf_handle));
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: maxMemCompactionLevel
+ * Signature: (JJ)I
*/
-bool rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
- rocksdb::ColumnFamilyHandle* cf_handle,
- jbyteArray jbegin, jint jbegin_len,
- jbyteArray jend, jint jend_len,
- jboolean jreduce_level, jint jtarget_level,
- jint jtarget_path_id) {
- rocksdb::CompactRangeOptions compact_options;
- compact_options.change_level = jreduce_level;
- compact_options.target_level = jtarget_level;
- compact_options.target_path_id = static_cast<uint32_t>(jtarget_path_id);
+jint Java_org_rocksdb_RocksDB_maxMemCompactionLevel(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ return static_cast<jint>(db->MaxMemCompactionLevel(cf_handle));
+}
- return rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len,
- jend, jend_len, compact_options);
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: level0StopWriteTrigger
+ * Signature: (JJ)I
+ */
+jint Java_org_rocksdb_RocksDB_level0StopWriteTrigger(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ return static_cast<jint>(db->Level0StopWriteTrigger(cf_handle));
}
/*
* Class: org_rocksdb_RocksDB
- * Method: compactRange0
- * Signature: (J[BI[BIZII)V
+ * Method: getName
+ * Signature: (J)Ljava/lang/String;
*/
-void Java_org_rocksdb_RocksDB_compactRange0__J_3BI_3BIZII(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin,
- jint jbegin_len, jbyteArray jend, jint jend_len, jboolean jreduce_level,
- jint jtarget_level, jint jtarget_path_id) {
+jstring Java_org_rocksdb_RocksDB_getName(
+ JNIEnv* env, jobject, jlong jdb_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- rocksdb_compactrange_helper(env, db, nullptr, jbegin, jbegin_len, jend,
- jend_len, jreduce_level, jtarget_level,
- jtarget_path_id);
+ std::string name = db->GetName();
+ return rocksdb::JniUtil::toJavaString(env, &name, false);
}
/*
* Class: org_rocksdb_RocksDB
- * Method: compactRange
- * Signature: (JJ[BI[BIZII)V
+ * Method: getEnv
+ * Signature: (J)J
*/
-void Java_org_rocksdb_RocksDB_compactRange__J_3BI_3BIZIIJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin,
- jint jbegin_len, jbyteArray jend, jint jend_len, jboolean jreduce_level,
- jint jtarget_level, jint jtarget_path_id, jlong jcf_handle) {
+jlong Java_org_rocksdb_RocksDB_getEnv(
+ JNIEnv*, jobject, jlong jdb_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len, jend,
- jend_len, jreduce_level, jtarget_level,
- jtarget_path_id);
+ return reinterpret_cast<jlong>(db->GetEnv());
}
-
-void Java_org_rocksdb_RocksDB_compactRange__J_3BI_3BIJJ(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jbyteArray jbegin,
- jint jbegin_len, jbyteArray jend, jint jend_len,
- jlong jcompact_options_handle, jlong jcf_handle) {
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: flush
+ * Signature: (JJ[J)V
+ */
+void Java_org_rocksdb_RocksDB_flush(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jflush_opts_handle,
+ jlongArray jcf_handles) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- auto* compact_options = reinterpret_cast<rocksdb::CompactRangeOptions*>(jcompact_options_handle);
-
- rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len, jend,
- jend_len, *compact_options);
+ auto* flush_opts =
+ reinterpret_cast<rocksdb::FlushOptions*>(jflush_opts_handle);
+ std::vector<rocksdb::ColumnFamilyHandle*> cf_handles;
+ if (jcf_handles == nullptr) {
+ cf_handles.push_back(db->DefaultColumnFamily());
+ } else {
+ jboolean has_exception = JNI_FALSE;
+ cf_handles =
+ rocksdb::JniUtil::fromJPointers<rocksdb::ColumnFamilyHandle>(
+ env, jcf_handles, &has_exception);
+ if (has_exception) {
+ // exception occurred
+ return;
+ }
+ }
+ auto s = db->Flush(*flush_opts, cf_handles);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
}
-
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::PauseBackgroundWork
-
/*
* Class: org_rocksdb_RocksDB
- * Method: pauseBackgroundWork
- * Signature: (J)V
+ * Method: flushWal
+ * Signature: (JZ)V
*/
-void Java_org_rocksdb_RocksDB_pauseBackgroundWork(JNIEnv* env, jobject /*jobj*/,
- jlong jdb_handle) {
+void Java_org_rocksdb_RocksDB_flushWal(
+ JNIEnv* env, jobject, jlong jdb_handle, jboolean jsync) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto s = db->PauseBackgroundWork();
+ auto s = db->FlushWAL(jsync == JNI_TRUE);
if (!s.ok()) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::ContinueBackgroundWork
-
/*
* Class: org_rocksdb_RocksDB
- * Method: continueBackgroundWork
+ * Method: syncWal
* Signature: (J)V
*/
-void Java_org_rocksdb_RocksDB_continueBackgroundWork(JNIEnv* env,
- jobject /*jobj*/,
- jlong jdb_handle) {
+void Java_org_rocksdb_RocksDB_syncWal(
+ JNIEnv* env, jobject, jlong jdb_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto s = db->ContinueBackgroundWork();
+ auto s = db->SyncWAL();
if (!s.ok()) {
rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
}
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::GetLatestSequenceNumber
-
/*
* Class: org_rocksdb_RocksDB
* Method: getLatestSequenceNumber
* Signature: (J)V
*/
-jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber(JNIEnv* /*env*/,
- jobject /*jdb*/,
- jlong jdb_handle) {
+jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber(
+ JNIEnv*, jobject, jlong jdb_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
return db->GetLatestSequenceNumber();
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB enable/disable file deletions
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: setPreserveDeletesSequenceNumber
+ * Signature: (JJ)Z
+ */
+jboolean JNICALL Java_org_rocksdb_RocksDB_setPreserveDeletesSequenceNumber(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jseq_number) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ if (db->SetPreserveDeletesSequenceNumber(
+ static_cast<uint64_t>(jseq_number))) {
+ return JNI_TRUE;
+ } else {
+ return JNI_FALSE;
+ }
+}
/*
* Class: org_rocksdb_RocksDB
- * Method: enableFileDeletions
+ * Method: disableFileDeletions
* Signature: (J)V
*/
-void Java_org_rocksdb_RocksDB_disableFileDeletions(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle) {
+void Java_org_rocksdb_RocksDB_disableFileDeletions(
+ JNIEnv* env, jobject, jlong jdb_handle) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
rocksdb::Status s = db->DisableFileDeletions();
if (!s.ok()) {
* Method: enableFileDeletions
* Signature: (JZ)V
*/
-void Java_org_rocksdb_RocksDB_enableFileDeletions(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jboolean jforce) {
+void Java_org_rocksdb_RocksDB_enableFileDeletions(
+ JNIEnv* env, jobject, jlong jdb_handle, jboolean jforce) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
rocksdb::Status s = db->EnableFileDeletions(jforce);
if (!s.ok()) {
}
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::GetUpdatesSince
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getLiveFiles
+ * Signature: (JZ)[Ljava/lang/String;
+ */
+jobjectArray Java_org_rocksdb_RocksDB_getLiveFiles(
+ JNIEnv* env, jobject, jlong jdb_handle, jboolean jflush_memtable) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ std::vector<std::string> live_files;
+ uint64_t manifest_file_size = 0;
+ auto s = db->GetLiveFiles(
+ live_files, &manifest_file_size, jflush_memtable == JNI_TRUE);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // append the manifest_file_size to the vector
+ // for passing back to java
+ live_files.push_back(std::to_string(manifest_file_size));
+
+ return rocksdb::JniUtil::toJavaStrings(env, &live_files);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getSortedWalFiles
+ * Signature: (J)[Lorg/rocksdb/LogFile;
+ */
+jobjectArray Java_org_rocksdb_RocksDB_getSortedWalFiles(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ std::vector<std::unique_ptr<rocksdb::LogFile>> sorted_wal_files;
+ auto s = db->GetSortedWalFiles(sorted_wal_files);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // convert to Java type
+ const jsize jlen = static_cast<jsize>(sorted_wal_files.size());
+ jobjectArray jsorted_wal_files = env->NewObjectArray(
+ jlen, rocksdb::LogFileJni::getJClass(env), nullptr);
+ if(jsorted_wal_files == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+
+ jsize i = 0;
+ for (auto it = sorted_wal_files.begin(); it != sorted_wal_files.end(); ++it) {
+ jobject jlog_file = rocksdb::LogFileJni::fromCppLogFile(env, it->get());
+ if (jlog_file == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jsorted_wal_files);
+ return nullptr;
+ }
+
+ env->SetObjectArrayElement(jsorted_wal_files, i++, jlog_file);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jlog_file);
+ env->DeleteLocalRef(jsorted_wal_files);
+ return nullptr;
+ }
+
+ env->DeleteLocalRef(jlog_file);
+ }
+
+ return jsorted_wal_files;
+}
/*
* Class: org_rocksdb_RocksDB
* Method: getUpdatesSince
* Signature: (JJ)J
*/
-jlong Java_org_rocksdb_RocksDB_getUpdatesSince(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle,
- jlong jsequence_number) {
+jlong Java_org_rocksdb_RocksDB_getUpdatesSince(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jsequence_number) {
auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
rocksdb::SequenceNumber sequence_number =
static_cast<rocksdb::SequenceNumber>(jsequence_number);
/*
* Class: org_rocksdb_RocksDB
- * Method: setOptions
- * Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V
+ * Method: deleteFile
+ * Signature: (JLjava/lang/String;)V
*/
-void Java_org_rocksdb_RocksDB_setOptions(JNIEnv* env, jobject /*jdb*/,
- jlong jdb_handle, jlong jcf_handle,
- jobjectArray jkeys,
- jobjectArray jvalues) {
- const jsize len = env->GetArrayLength(jkeys);
- assert(len == env->GetArrayLength(jvalues));
-
- std::unordered_map<std::string, std::string> options_map;
- for (jsize i = 0; i < len; i++) {
- jobject jobj_key = env->GetObjectArrayElement(jkeys, i);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- return;
- }
-
- jobject jobj_value = env->GetObjectArrayElement(jvalues, i);
- if (env->ExceptionCheck()) {
- // exception thrown: ArrayIndexOutOfBoundsException
- env->DeleteLocalRef(jobj_key);
- return;
- }
+void Java_org_rocksdb_RocksDB_deleteFile(
+ JNIEnv* env, jobject, jlong jdb_handle, jstring jname) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ jboolean has_exception = JNI_FALSE;
+ std::string name =
+ rocksdb::JniUtil::copyStdString(env, jname, &has_exception);
+ if (has_exception == JNI_TRUE) {
+ // exception occurred
+ return;
+ }
+ db->DeleteFile(name);
+}
- jstring jkey = reinterpret_cast<jstring>(jobj_key);
- jstring jval = reinterpret_cast<jstring>(jobj_value);
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getLiveFilesMetaData
+ * Signature: (J)[Lorg/rocksdb/LiveFileMetaData;
+ */
+jobjectArray Java_org_rocksdb_RocksDB_getLiveFilesMetaData(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ std::vector<rocksdb::LiveFileMetaData> live_files_meta_data;
+ db->GetLiveFilesMetaData(&live_files_meta_data);
+
+ // convert to Java type
+ const jsize jlen = static_cast<jsize>(live_files_meta_data.size());
+ jobjectArray jlive_files_meta_data = env->NewObjectArray(
+ jlen, rocksdb::LiveFileMetaDataJni::getJClass(env), nullptr);
+ if(jlive_files_meta_data == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
- const char* key = env->GetStringUTFChars(jkey, nullptr);
- if (key == nullptr) {
- // exception thrown: OutOfMemoryError
- env->DeleteLocalRef(jobj_value);
- env->DeleteLocalRef(jobj_key);
- return;
+ jsize i = 0;
+ for (auto it = live_files_meta_data.begin(); it != live_files_meta_data.end(); ++it) {
+ jobject jlive_file_meta_data =
+ rocksdb::LiveFileMetaDataJni::fromCppLiveFileMetaData(env, &(*it));
+ if (jlive_file_meta_data == nullptr) {
+ // exception occurred
+ env->DeleteLocalRef(jlive_files_meta_data);
+ return nullptr;
}
- const char* value = env->GetStringUTFChars(jval, nullptr);
- if (value == nullptr) {
- // exception thrown: OutOfMemoryError
- env->ReleaseStringUTFChars(jkey, key);
- env->DeleteLocalRef(jobj_value);
- env->DeleteLocalRef(jobj_key);
- return;
+ env->SetObjectArrayElement(jlive_files_meta_data, i++, jlive_file_meta_data);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ env->DeleteLocalRef(jlive_file_meta_data);
+ env->DeleteLocalRef(jlive_files_meta_data);
+ return nullptr;
}
- std::string s_key(key);
- std::string s_value(value);
- options_map[s_key] = s_value;
-
- env->ReleaseStringUTFChars(jkey, key);
- env->ReleaseStringUTFChars(jval, value);
- env->DeleteLocalRef(jobj_key);
- env->DeleteLocalRef(jobj_value);
+ env->DeleteLocalRef(jlive_file_meta_data);
}
- auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
- auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
- db->SetOptions(cf_handle, options_map);
+ return jlive_files_meta_data;
}
-//////////////////////////////////////////////////////////////////////////////
-// rocksdb::DB::IngestExternalFile
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getColumnFamilyMetaData
+ * Signature: (JJ)Lorg/rocksdb/ColumnFamilyMetaData;
+ */
+jobject Java_org_rocksdb_RocksDB_getColumnFamilyMetaData(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ rocksdb::ColumnFamilyMetaData cf_metadata;
+ db->GetColumnFamilyMetaData(cf_handle, &cf_metadata);
+ return rocksdb::ColumnFamilyMetaDataJni::fromCppColumnFamilyMetaData(
+ env, &cf_metadata);
+}
/*
* Class: org_rocksdb_RocksDB
* Signature: (JJ[Ljava/lang/String;IJ)V
*/
void Java_org_rocksdb_RocksDB_ingestExternalFile(
- JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jcf_handle,
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
jobjectArray jfile_path_list, jint jfile_path_list_len,
jlong jingest_external_file_options_handle) {
jboolean has_exception = JNI_FALSE;
}
}
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: verifyChecksum
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_RocksDB_verifyChecksum(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto s = db->VerifyChecksum();
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getDefaultColumnFamily
+ * Signature: (J)J
+ */
+jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily(
+ JNIEnv*, jobject, jlong jdb_handle) {
+ auto* db_handle = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto* cf_handle = db_handle->DefaultColumnFamily();
+ return reinterpret_cast<jlong>(cf_handle);
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getPropertiesOfAllTables
+ * Signature: (JJ)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_RocksDB_getPropertiesOfAllTables(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ rocksdb::TablePropertiesCollection table_properties_collection;
+ auto s = db->GetPropertiesOfAllTables(cf_handle,
+ &table_properties_collection);
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+
+ // convert to Java type
+ jobject jhash_map = rocksdb::HashMapJni::construct(
+ env, static_cast<uint32_t>(table_properties_collection.size()));
+ if (jhash_map == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const rocksdb::HashMapJni::FnMapKV<const std::string, const std::shared_ptr<const rocksdb::TableProperties>, jobject, jobject> fn_map_kv =
+ [env](const std::pair<const std::string, const std::shared_ptr<const rocksdb::TableProperties>>& kv) {
+ jstring jkey = rocksdb::JniUtil::toJavaString(env, &(kv.first), false);
+ if (env->ExceptionCheck()) {
+ // an error occurred
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ jobject jtable_properties = rocksdb::TablePropertiesJni::fromCppTableProperties(env, *(kv.second.get()));
+ if (jtable_properties == nullptr) {
+ // an error occurred
+ env->DeleteLocalRef(jkey);
+ return std::unique_ptr<std::pair<jobject, jobject>>(nullptr);
+ }
+
+ return std::unique_ptr<std::pair<jobject, jobject>>(new std::pair<jobject, jobject>(static_cast<jobject>(jkey), static_cast<jobject>(jtable_properties)));
+ };
+
+ if (!rocksdb::HashMapJni::putAll(env, jhash_map, table_properties_collection.begin(), table_properties_collection.end(), fn_map_kv)) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jhash_map;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: getPropertiesOfTablesInRange
+ * Signature: (JJ[J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_RocksDB_getPropertiesOfTablesInRange(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle,
+ jlongArray jrange_slice_handles) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ const jsize jlen = env->GetArrayLength(jrange_slice_handles);
+ jboolean jrange_slice_handles_is_copy = JNI_FALSE;
+ jlong *jrange_slice_handle = env->GetLongArrayElements(
+ jrange_slice_handles, &jrange_slice_handles_is_copy);
+ if (jrange_slice_handle == nullptr) {
+ // exception occurred
+ return nullptr;
+ }
+
+ const size_t ranges_len = static_cast<size_t>(jlen / 2);
+ auto ranges = std::unique_ptr<rocksdb::Range[]>(new rocksdb::Range[ranges_len]);
+ for (jsize i = 0, j = 0; i < jlen; ++i) {
+ auto* start = reinterpret_cast<rocksdb::Slice*>(
+ jrange_slice_handle[i]);
+ auto* limit = reinterpret_cast<rocksdb::Slice*>(
+ jrange_slice_handle[++i]);
+ ranges[j++] = rocksdb::Range(*start, *limit);
+ }
+
+ rocksdb::TablePropertiesCollection table_properties_collection;
+ auto s = db->GetPropertiesOfTablesInRange(
+ cf_handle, ranges.get(), ranges_len, &table_properties_collection);
+ if (!s.ok()) {
+ // error occurred
+ env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, JNI_ABORT);
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ // cleanup
+ env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, JNI_ABORT);
+
+ return jrange_slice_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: suggestCompactRange
+ * Signature: (JJ)[J
+ */
+jlongArray Java_org_rocksdb_RocksDB_suggestCompactRange(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ auto* begin = new rocksdb::Slice();
+ auto* end = new rocksdb::Slice();
+ auto s = db->SuggestCompactRange(cf_handle, begin, end);
+ if (!s.ok()) {
+ // error occurred
+ delete begin;
+ delete end;
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ return nullptr;
+ }
+
+ jlongArray jslice_handles = env->NewLongArray(2);
+ if (jslice_handles == nullptr) {
+ // exception thrown: OutOfMemoryError
+ delete begin;
+ delete end;
+ return nullptr;
+ }
+
+ jlong slice_handles[2];
+ slice_handles[0] = reinterpret_cast<jlong>(begin);
+ slice_handles[1] = reinterpret_cast<jlong>(end);
+ env->SetLongArrayRegion(jslice_handles, 0, 2, slice_handles);
+ if (env->ExceptionCheck()) {
+ // exception thrown: ArrayIndexOutOfBoundsException
+ delete begin;
+ delete end;
+ env->DeleteLocalRef(jslice_handles);
+ return nullptr;
+ }
+
+ return jslice_handles;
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: promoteL0
+ * Signature: (JJI)V
+ */
+void Java_org_rocksdb_RocksDB_promoteL0(
+ JNIEnv*, jobject, jlong jdb_handle, jlong jcf_handle, jint jtarget_level) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::ColumnFamilyHandle* cf_handle;
+ if (jcf_handle == 0) {
+ cf_handle = db->DefaultColumnFamily();
+ } else {
+ cf_handle =
+ reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle);
+ }
+ db->PromoteL0(cf_handle, static_cast<int>(jtarget_level));
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: startTrace
+ * Signature: (JJJ)V
+ */
+void Java_org_rocksdb_RocksDB_startTrace(
+ JNIEnv* env, jobject, jlong jdb_handle, jlong jmax_trace_file_size,
+ jlong jtrace_writer_jnicallback_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ rocksdb::TraceOptions trace_options;
+ trace_options.max_trace_file_size =
+ static_cast<uint64_t>(jmax_trace_file_size);
+ // transfer ownership of trace writer from Java to C++
+ auto trace_writer = std::unique_ptr<rocksdb::TraceWriterJniCallback>(
+ reinterpret_cast<rocksdb::TraceWriterJniCallback*>(
+ jtrace_writer_jnicallback_handle));
+ auto s = db->StartTrace(trace_options, std::move(trace_writer));
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
+/*
+ * Class: org_rocksdb_RocksDB
+ * Method: endTrace
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_org_rocksdb_RocksDB_endTrace(
+ JNIEnv* env, jobject, jlong jdb_handle) {
+ auto* db = reinterpret_cast<rocksdb::DB*>(jdb_handle);
+ auto s = db->EndTrace();
+ if (!s.ok()) {
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+ }
+}
+
/*
* Class: org_rocksdb_RocksDB
* Method: destroyDB
* Signature: (Ljava/lang/String;J)V
*/
-void Java_org_rocksdb_RocksDB_destroyDB(JNIEnv* env, jclass /*jcls*/,
- jstring jdb_path,
- jlong joptions_handle) {
+void Java_org_rocksdb_RocksDB_destroyDB(
+ JNIEnv* env, jclass, jstring jdb_path, jlong joptions_handle) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
// exception thrown: OutOfMemoryError
reinterpret_cast<std::shared_ptr<rocksdb::SstFileManager>*>(jhandle);
auto tracked_files = sptr_sst_file_manager->get()->GetTrackedFiles();
+ //TODO(AR) could refactor to share code with rocksdb::HashMapJni::fromCppMap(env, tracked_files);
+
const jobject jtracked_files = rocksdb::HashMapJni::construct(
env, static_cast<uint32_t>(tracked_files.size()));
if (jtracked_files == nullptr) {
return nullptr;
}
- const rocksdb::HashMapJni::FnMapKV<const std::string, const uint64_t>
+ const rocksdb::HashMapJni::FnMapKV<const std::string, const uint64_t, jobject, jobject>
fn_map_kv =
[env](const std::pair<const std::string, const uint64_t>& pair) {
const jstring jtracked_file_path =
* Method: newStatistics
* Signature: ()J
*/
-jlong Java_org_rocksdb_Statistics_newStatistics__(JNIEnv* env, jclass jcls) {
- return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, nullptr, 0);
+jlong Java_org_rocksdb_Statistics_newStatistics__(
+ JNIEnv* env, jclass jcls) {
+ return Java_org_rocksdb_Statistics_newStatistics___3BJ(
+ env, jcls, nullptr, 0);
}
/*
* Method: newStatistics
* Signature: ([B)J
*/
-jlong Java_org_rocksdb_Statistics_newStatistics___3B(JNIEnv* env, jclass jcls,
- jbyteArray jhistograms) {
- return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, jhistograms,
- 0);
+jlong Java_org_rocksdb_Statistics_newStatistics___3B(
+ JNIEnv* env, jclass jcls, jbyteArray jhistograms) {
+ return Java_org_rocksdb_Statistics_newStatistics___3BJ(
+ env, jcls, jhistograms, 0);
}
/*
* Signature: ([BJ)J
*/
jlong Java_org_rocksdb_Statistics_newStatistics___3BJ(
- JNIEnv* env, jclass /*jcls*/, jbyteArray jhistograms,
- jlong jother_statistics_handle) {
+ JNIEnv* env, jclass, jbyteArray jhistograms, jlong jother_statistics_handle) {
std::shared_ptr<rocksdb::Statistics>* pSptr_other_statistics = nullptr;
if (jother_statistics_handle > 0) {
pSptr_other_statistics =
* Method: disposeInternal
* Signature: (J)V
*/
-void Java_org_rocksdb_Statistics_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_Statistics_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
if (jhandle > 0) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
* Method: statsLevel
* Signature: (J)B
*/
-jbyte Java_org_rocksdb_Statistics_statsLevel(JNIEnv* /*env*/, jobject /*jobj*/,
- jlong jhandle) {
+jbyte Java_org_rocksdb_Statistics_statsLevel(
+ JNIEnv*, jobject, jlong jhandle) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
* Method: setStatsLevel
* Signature: (JB)V
*/
-void Java_org_rocksdb_Statistics_setStatsLevel(JNIEnv* /*env*/,
- jobject /*jobj*/, jlong jhandle,
- jbyte jstats_level) {
+void Java_org_rocksdb_Statistics_setStatsLevel(
+ JNIEnv*, jobject, jlong jhandle, jbyte jstats_level) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
* Method: getTickerCount
* Signature: (JB)J
*/
-jlong Java_org_rocksdb_Statistics_getTickerCount(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jbyte jticker_type) {
+jlong Java_org_rocksdb_Statistics_getTickerCount(
+ JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
auto ticker = rocksdb::TickerTypeJni::toCppTickers(jticker_type);
- return pSptr_statistics->get()->getTickerCount(ticker);
+ uint64_t count = pSptr_statistics->get()->getTickerCount(ticker);
+ return static_cast<jlong>(count);
}
/*
* Method: getAndResetTickerCount
* Signature: (JB)J
*/
-jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle,
- jbyte jticker_type) {
+jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(
+ JNIEnv*, jobject, jlong jhandle, jbyte jticker_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
* Method: getHistogramData
* Signature: (JB)Lorg/rocksdb/HistogramData;
*/
-jobject Java_org_rocksdb_Statistics_getHistogramData(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle,
- jbyte jhistogram_type) {
+jobject Java_org_rocksdb_Statistics_getHistogramData(
+ JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
- rocksdb::HistogramData
- data; // TODO(AR) perhaps better to construct a Java Object Wrapper that
- // uses ptr to C++ `new HistogramData`
+ // TODO(AR) perhaps better to construct a Java Object Wrapper that
+ // uses ptr to C++ `new HistogramData`
+ rocksdb::HistogramData data;
+
auto histogram = rocksdb::HistogramTypeJni::toCppHistograms(jhistogram_type);
pSptr_statistics->get()->histogramData(
static_cast<rocksdb::Histograms>(histogram), &data);
* Method: getHistogramString
* Signature: (JB)Ljava/lang/String;
*/
-jstring Java_org_rocksdb_Statistics_getHistogramString(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle,
- jbyte jhistogram_type) {
+jstring Java_org_rocksdb_Statistics_getHistogramString(
+ JNIEnv* env, jobject, jlong jhandle, jbyte jhistogram_type) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
* Method: reset
* Signature: (J)V
*/
-void Java_org_rocksdb_Statistics_reset(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle) {
+void Java_org_rocksdb_Statistics_reset(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
* Method: toString
* Signature: (J)Ljava/lang/String;
*/
-jstring Java_org_rocksdb_Statistics_toString(JNIEnv* env, jobject /*jobj*/,
- jlong jhandle) {
+jstring Java_org_rocksdb_Statistics_toString(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* pSptr_statistics =
reinterpret_cast<std::shared_ptr<rocksdb::Statistics>*>(jhandle);
assert(pSptr_statistics != nullptr);
#include <jni.h>
#include "include/org_rocksdb_BlockBasedTableConfig.h"
#include "include/org_rocksdb_PlainTableConfig.h"
+#include "portal.h"
#include "rocksdb/cache.h"
#include "rocksdb/filter_policy.h"
/*
* Class: org_rocksdb_BlockBasedTableConfig
* Method: newTableFactoryHandle
- * Signature: (ZJIJJIIZJZZZZJZZJIBBI)J
+ * Signature: (ZZZZBBDBZJJJJIIIJZZJZZIIZZJIJI)J
*/
jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle(
- JNIEnv * /*env*/, jobject /*jobj*/, jboolean no_block_cache,
- jlong block_cache_size, jint block_cache_num_shardbits, jlong jblock_cache,
- jlong block_size, jint block_size_deviation, jint block_restart_interval,
- jboolean whole_key_filtering, jlong jfilter_policy,
- jboolean cache_index_and_filter_blocks,
- jboolean cache_index_and_filter_blocks_with_high_priority,
- jboolean pin_l0_filter_and_index_blocks_in_cache,
- jboolean partition_filters, jlong metadata_block_size,
- jboolean pin_top_level_index_and_filter,
- jboolean hash_index_allow_collision, jlong block_cache_compressed_size,
- jint block_cache_compressd_num_shard_bits, jbyte jchecksum_type,
- jbyte jindex_type, jint jformat_version) {
+ JNIEnv*, jobject, jboolean jcache_index_and_filter_blocks,
+ jboolean jcache_index_and_filter_blocks_with_high_priority,
+ jboolean jpin_l0_filter_and_index_blocks_in_cache,
+ jboolean jpin_top_level_index_and_filter, jbyte jindex_type_value,
+ jbyte jdata_block_index_type_value,
+ jdouble jdata_block_hash_table_util_ratio, jbyte jchecksum_type_value,
+ jboolean jno_block_cache, jlong jblock_cache_handle,
+ jlong jpersistent_cache_handle,
+ jlong jblock_cache_compressed_handle, jlong jblock_size,
+ jint jblock_size_deviation, jint jblock_restart_interval,
+ jint jindex_block_restart_interval, jlong jmetadata_block_size,
+ jboolean jpartition_filters, jboolean juse_delta_encoding,
+ jlong jfilter_policy_handle, jboolean jwhole_key_filtering,
+ jboolean jverify_compression, jint jread_amp_bytes_per_bit,
+ jint jformat_version, jboolean jenable_index_compression,
+ jboolean jblock_align, jlong jblock_cache_size,
+ jint jblock_cache_num_shard_bits, jlong jblock_cache_compressed_size,
+ jint jblock_cache_compressed_num_shard_bits) {
rocksdb::BlockBasedTableOptions options;
- options.no_block_cache = no_block_cache;
-
- if (!no_block_cache) {
- if (jblock_cache > 0) {
+ options.cache_index_and_filter_blocks =
+ static_cast<bool>(jcache_index_and_filter_blocks);
+ options.cache_index_and_filter_blocks_with_high_priority =
+ static_cast<bool>(jcache_index_and_filter_blocks_with_high_priority);
+ options.pin_l0_filter_and_index_blocks_in_cache =
+ static_cast<bool>(jpin_l0_filter_and_index_blocks_in_cache);
+ options.pin_top_level_index_and_filter =
+ static_cast<bool>(jpin_top_level_index_and_filter);
+ options.index_type =
+ rocksdb::IndexTypeJni::toCppIndexType(jindex_type_value);
+ options.data_block_index_type =
+ rocksdb::DataBlockIndexTypeJni::toCppDataBlockIndexType(
+ jdata_block_index_type_value);
+ options.data_block_hash_table_util_ratio =
+ static_cast<double>(jdata_block_hash_table_util_ratio);
+ options.checksum =
+ rocksdb::ChecksumTypeJni::toCppChecksumType(jchecksum_type_value);
+ options.no_block_cache = static_cast<bool>(jno_block_cache);
+ if (options.no_block_cache) {
+ options.block_cache = nullptr;
+ } else {
+ if (jblock_cache_handle > 0) {
std::shared_ptr<rocksdb::Cache> *pCache =
- reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jblock_cache);
+ reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jblock_cache_handle);
options.block_cache = *pCache;
- } else if (block_cache_size > 0) {
- if (block_cache_num_shardbits > 0) {
- options.block_cache =
- rocksdb::NewLRUCache(block_cache_size, block_cache_num_shardbits);
+ } else if (jblock_cache_size > 0) {
+ if (jblock_cache_num_shard_bits > 0) {
+ options.block_cache = rocksdb::NewLRUCache(
+ static_cast<size_t>(jblock_cache_size),
+ static_cast<int>(jblock_cache_num_shard_bits));
} else {
- options.block_cache = rocksdb::NewLRUCache(block_cache_size);
+ options.block_cache = rocksdb::NewLRUCache(
+ static_cast<size_t>(jblock_cache_size));
}
}
}
- options.block_size = block_size;
- options.block_size_deviation = block_size_deviation;
- options.block_restart_interval = block_restart_interval;
- options.whole_key_filtering = whole_key_filtering;
- if (jfilter_policy > 0) {
- std::shared_ptr<rocksdb::FilterPolicy> *pFilterPolicy =
- reinterpret_cast<std::shared_ptr<rocksdb::FilterPolicy> *>(
- jfilter_policy);
- options.filter_policy = *pFilterPolicy;
+ if (jpersistent_cache_handle > 0) {
+ std::shared_ptr<rocksdb::PersistentCache> *pCache =
+ reinterpret_cast<std::shared_ptr<rocksdb::PersistentCache> *>(jpersistent_cache_handle);
+ options.persistent_cache = *pCache;
}
- options.cache_index_and_filter_blocks = cache_index_and_filter_blocks;
- options.cache_index_and_filter_blocks_with_high_priority =
- cache_index_and_filter_blocks_with_high_priority;
- options.pin_l0_filter_and_index_blocks_in_cache =
- pin_l0_filter_and_index_blocks_in_cache;
- options.partition_filters = partition_filters;
- options.metadata_block_size = metadata_block_size;
- options.pin_top_level_index_and_filter = pin_top_level_index_and_filter;
- options.hash_index_allow_collision = hash_index_allow_collision;
- if (block_cache_compressed_size > 0) {
- if (block_cache_compressd_num_shard_bits > 0) {
- options.block_cache = rocksdb::NewLRUCache(
- block_cache_compressed_size, block_cache_compressd_num_shard_bits);
+ if (jblock_cache_compressed_handle > 0) {
+ std::shared_ptr<rocksdb::Cache> *pCache =
+ reinterpret_cast<std::shared_ptr<rocksdb::Cache> *>(jblock_cache_compressed_handle);
+ options.block_cache_compressed = *pCache;
+ } else if (jblock_cache_compressed_size > 0) {
+ if (jblock_cache_compressed_num_shard_bits > 0) {
+ options.block_cache_compressed = rocksdb::NewLRUCache(
+ static_cast<size_t>(jblock_cache_compressed_size),
+ static_cast<int>(jblock_cache_compressed_num_shard_bits));
} else {
- options.block_cache = rocksdb::NewLRUCache(block_cache_compressed_size);
+ options.block_cache_compressed = rocksdb::NewLRUCache(
+ static_cast<size_t>(jblock_cache_compressed_size));
}
}
- options.checksum = static_cast<rocksdb::ChecksumType>(jchecksum_type);
- options.index_type =
- static_cast<rocksdb::BlockBasedTableOptions::IndexType>(jindex_type);
- options.format_version = jformat_version;
+ options.block_size = static_cast<size_t>(jblock_size);
+ options.block_size_deviation = static_cast<int>(jblock_size_deviation);
+ options.block_restart_interval = static_cast<int>(jblock_restart_interval);
+ options.index_block_restart_interval = static_cast<int>(jindex_block_restart_interval);
+ options.metadata_block_size = static_cast<uint64_t>(jmetadata_block_size);
+ options.partition_filters = static_cast<bool>(jpartition_filters);
+ options.use_delta_encoding = static_cast<bool>(juse_delta_encoding);
+ if (jfilter_policy_handle > 0) {
+ std::shared_ptr<rocksdb::FilterPolicy> *pFilterPolicy =
+ reinterpret_cast<std::shared_ptr<rocksdb::FilterPolicy> *>(
+ jfilter_policy_handle);
+ options.filter_policy = *pFilterPolicy;
+ }
+ options.whole_key_filtering = static_cast<bool>(jwhole_key_filtering);
+ options.verify_compression = static_cast<bool>(jverify_compression);
+ options.read_amp_bytes_per_bit = static_cast<uint32_t>(jread_amp_bytes_per_bit);
+ options.format_version = static_cast<uint32_t>(jformat_version);
+ options.enable_index_compression = static_cast<bool>(jenable_index_compression);
+ options.block_align = static_cast<bool>(jblock_align);
return reinterpret_cast<jlong>(rocksdb::NewBlockBasedTableFactory(options));
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// org.rocksdb.AbstractTableFilter.
+
+#include <jni.h>
+#include <memory>
+
+#include "include/org_rocksdb_AbstractTableFilter.h"
+#include "rocksjni/table_filter_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractTableFilter
+ * Method: createNewTableFilter
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractTableFilter_createNewTableFilter(
+ JNIEnv* env, jobject jtable_filter) {
+ auto* table_filter_jnicallback =
+ new rocksdb::TableFilterJniCallback(env, jtable_filter);
+ return reinterpret_cast<jlong>(table_filter_jnicallback);
+}
\ No newline at end of file
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// rocksdb::TableFilter.
+
+#include "rocksjni/table_filter_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace rocksdb {
+TableFilterJniCallback::TableFilterJniCallback(
+ JNIEnv* env, jobject jtable_filter)
+ : JniCallback(env, jtable_filter) {
+ m_jfilter_methodid =
+ AbstractTableFilterJni::getFilterMethod(env);
+ if(m_jfilter_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ // create the function reference
+ /*
+ Note the JNI ENV must be obtained/release
+ on each call to the function itself as
+ it may be called from multiple threads
+ */
+ m_table_filter_function = [this](const rocksdb::TableProperties& table_properties) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* thread_env = getJniEnv(&attached_thread);
+ assert(thread_env != nullptr);
+
+ // create a Java TableProperties object
+ jobject jtable_properties = TablePropertiesJni::fromCppTableProperties(thread_env, table_properties);
+ if (jtable_properties == nullptr) {
+ // exception thrown from fromCppTableProperties
+ thread_env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return false;
+ }
+
+ jboolean result = thread_env->CallBooleanMethod(m_jcallback_obj, m_jfilter_methodid, jtable_properties);
+ if (thread_env->ExceptionCheck()) {
+ // exception thrown from CallBooleanMethod
+ thread_env->DeleteLocalRef(jtable_properties);
+ thread_env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return false;
+ }
+
+ // ok... cleanup and then return
+ releaseJniEnv(attached_thread);
+ return static_cast<bool>(result);
+ };
+}
+
+std::function<bool(const rocksdb::TableProperties&)> TableFilterJniCallback::GetTableFilterFunction() {
+ return m_table_filter_function;
+}
+
+} // namespace rocksdb
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// rocksdb::TableFilter.
+
+#ifndef JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
+
+#include <jni.h>
+#include <functional>
+#include <memory>
+
+#include "rocksdb/table_properties.h"
+#include "rocksjni/jnicallback.h"
+
+namespace rocksdb {
+
+class TableFilterJniCallback : public JniCallback {
+ public:
+ TableFilterJniCallback(
+ JNIEnv* env, jobject jtable_filter);
+ std::function<bool(const rocksdb::TableProperties&)> GetTableFilterFunction();
+
+ private:
+ jmethodID m_jfilter_methodid;
+ std::function<bool(const rocksdb::TableProperties&)> m_table_filter_function;
+};
+
+} //namespace rocksdb
+
+#endif // JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ and enables
+// calling c++ rocksdb::ThreadStatus methods from Java side.
+
+#include <jni.h>
+
+#include "portal.h"
+#include "include/org_rocksdb_ThreadStatus.h"
+#include "rocksdb/thread_status.h"
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getThreadTypeName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getThreadTypeName(
+ JNIEnv* env, jclass, jbyte jthread_type_value) {
+ auto name = rocksdb::ThreadStatus::GetThreadTypeName(
+ rocksdb::ThreadTypeJni::toCppThreadType(jthread_type_value));
+ return rocksdb::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getOperationName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getOperationName(
+ JNIEnv* env, jclass, jbyte joperation_type_value) {
+ auto name = rocksdb::ThreadStatus::GetOperationName(
+ rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value));
+ return rocksdb::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: microsToStringNative
+ * Signature: (J)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_microsToStringNative(
+ JNIEnv* env, jclass, jlong jmicros) {
+ auto str =
+ rocksdb::ThreadStatus::MicrosToString(static_cast<uint64_t>(jmicros));
+ return rocksdb::JniUtil::toJavaString(env, &str, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getOperationStageName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getOperationStageName(
+ JNIEnv* env, jclass, jbyte joperation_stage_value) {
+ auto name = rocksdb::ThreadStatus::GetOperationStageName(
+ rocksdb::OperationStageJni::toCppOperationStage(joperation_stage_value));
+ return rocksdb::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getOperationPropertyName
+ * Signature: (BI)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getOperationPropertyName(
+ JNIEnv* env, jclass, jbyte joperation_type_value, jint jindex) {
+ auto name = rocksdb::ThreadStatus::GetOperationPropertyName(
+ rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value),
+ static_cast<int>(jindex));
+ return rocksdb::JniUtil::toJavaString(env, &name, true);
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: interpretOperationProperties
+ * Signature: (B[J)Ljava/util/Map;
+ */
+jobject Java_org_rocksdb_ThreadStatus_interpretOperationProperties(
+ JNIEnv* env, jclass, jbyte joperation_type_value,
+ jlongArray joperation_properties) {
+
+ //convert joperation_properties
+ const jsize len = env->GetArrayLength(joperation_properties);
+ const std::unique_ptr<uint64_t[]> op_properties(new uint64_t[len]);
+ jlong* jop = env->GetLongArrayElements(joperation_properties, nullptr);
+ if (jop == nullptr) {
+ // exception thrown: OutOfMemoryError
+ return nullptr;
+ }
+ for (jsize i = 0; i < len; i++) {
+ op_properties[i] = static_cast<uint64_t>(jop[i]);
+ }
+ env->ReleaseLongArrayElements(joperation_properties, jop, JNI_ABORT);
+
+ // call the function
+ auto result = rocksdb::ThreadStatus::InterpretOperationProperties(
+ rocksdb::OperationTypeJni::toCppOperationType(joperation_type_value),
+ op_properties.get());
+ jobject jresult = rocksdb::HashMapJni::fromCppMap(env, &result);
+ if (env->ExceptionCheck()) {
+ // exception occurred
+ return nullptr;
+ }
+
+ return jresult;
+}
+
+/*
+ * Class: org_rocksdb_ThreadStatus
+ * Method: getStateName
+ * Signature: (B)Ljava/lang/String;
+ */
+jstring Java_org_rocksdb_ThreadStatus_getStateName(
+ JNIEnv* env, jclass, jbyte jstate_type_value) {
+ auto name = rocksdb::ThreadStatus::GetStateName(
+ rocksdb::StateTypeJni::toCppStateType(jstate_type_value));
+ return rocksdb::JniUtil::toJavaString(env, &name, true);
+}
\ No newline at end of file
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// rocksdb::CompactionFilterFactory.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_AbstractTraceWriter.h"
+#include "rocksjni/trace_writer_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractTraceWriter
+ * Method: createNewTraceWriter
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractTraceWriter_createNewTraceWriter(
+ JNIEnv* env, jobject jobj) {
+ auto* trace_writer = new rocksdb::TraceWriterJniCallback(env, jobj);
+ return reinterpret_cast<jlong>(trace_writer);
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// rocksdb::TraceWriter.
+
+#include "rocksjni/trace_writer_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace rocksdb {
+TraceWriterJniCallback::TraceWriterJniCallback(
+ JNIEnv* env, jobject jtrace_writer)
+ : JniCallback(env, jtrace_writer) {
+ m_jwrite_proxy_methodid =
+ AbstractTraceWriterJni::getWriteProxyMethodId(env);
+ if(m_jwrite_proxy_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_jclose_writer_proxy_methodid =
+ AbstractTraceWriterJni::getCloseWriterProxyMethodId(env);
+ if(m_jclose_writer_proxy_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_jget_file_size_methodid =
+ AbstractTraceWriterJni::getGetFileSizeMethodId(env);
+ if(m_jget_file_size_methodid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+}
+
+Status TraceWriterJniCallback::Write(const Slice& data) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return Status::IOError("Unable to attach JNI Environment");
+ }
+
+ jshort jstatus = env->CallShortMethod(m_jcallback_obj,
+ m_jwrite_proxy_methodid,
+ &data);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallShortMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return Status::IOError("Unable to call AbstractTraceWriter#writeProxy(long)");
+ }
+
+ // unpack status code and status sub-code from jstatus
+ jbyte jcode_value = (jstatus >> 8) & 0xFF;
+ jbyte jsub_code_value = jstatus & 0xFF;
+ std::unique_ptr<Status> s = StatusJni::toCppStatus(jcode_value, jsub_code_value);
+
+ releaseJniEnv(attached_thread);
+
+ return Status(*s);
+}
+
+Status TraceWriterJniCallback::Close() {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return Status::IOError("Unable to attach JNI Environment");
+ }
+
+ jshort jstatus = env->CallShortMethod(m_jcallback_obj,
+ m_jclose_writer_proxy_methodid);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallShortMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return Status::IOError("Unable to call AbstractTraceWriter#closeWriterProxy()");
+ }
+
+ // unpack status code and status sub-code from jstatus
+ jbyte code_value = (jstatus >> 8) & 0xFF;
+ jbyte sub_code_value = jstatus & 0xFF;
+ std::unique_ptr<Status> s = StatusJni::toCppStatus(code_value, sub_code_value);
+
+ releaseJniEnv(attached_thread);
+
+ return Status(*s);
+}
+
+uint64_t TraceWriterJniCallback::GetFileSize() {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return 0;
+ }
+
+ jlong jfile_size = env->CallLongMethod(m_jcallback_obj,
+ m_jget_file_size_methodid);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallLongMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return 0;
+ }
+
+ releaseJniEnv(attached_thread);
+
+ return static_cast<uint64_t>(jfile_size);
+}
+
+} // namespace rocksdb
\ No newline at end of file
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// rocksdb::TraceWriter.
+
+#ifndef JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
+
+#include <jni.h>
+#include <memory>
+
+#include "rocksdb/trace_reader_writer.h"
+#include "rocksjni/jnicallback.h"
+
+namespace rocksdb {
+
+class TraceWriterJniCallback : public JniCallback, public TraceWriter {
+ public:
+ TraceWriterJniCallback(
+ JNIEnv* env, jobject jtrace_writer);
+ virtual Status Write(const Slice& data);
+ virtual Status Close();
+ virtual uint64_t GetFileSize();
+
+ private:
+ jmethodID m_jwrite_proxy_methodid;
+ jmethodID m_jclose_writer_proxy_methodid;
+ jmethodID m_jget_file_size_methodid;
+};
+
+} //namespace rocksdb
+
+#endif // JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_
* Signature: (JJLjava/lang/String;)J
*/
jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2(
- JNIEnv* env, jclass /*jcls*/, jlong joptions_handle,
+ JNIEnv* env, jclass, jlong joptions_handle,
jlong jtxn_db_options_handle, jstring jdb_path) {
auto* options = reinterpret_cast<rocksdb::Options*>(joptions_handle);
auto* txn_db_options =
* Signature: (JJLjava/lang/String;[[B[J)[J
*/
jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J(
- JNIEnv* env, jclass /*jcls*/, jlong jdb_options_handle,
+ JNIEnv* env, jclass, jlong jdb_options_handle,
jlong jtxn_db_options_handle, jstring jdb_path, jobjectArray jcolumn_names,
jlongArray jcolumn_options_handles) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
}
}
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionDB_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
+ assert(txn_db != nullptr);
+ delete txn_db;
+}
+
+/*
+ * Class: org_rocksdb_TransactionDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TransactionDB_closeDatabase(
+ JNIEnv* env, jclass, jlong jhandle) {
+ auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
+ assert(txn_db != nullptr);
+ rocksdb::Status s = txn_db->Close();
+ rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+}
+
/*
* Class: org_rocksdb_TransactionDB
* Method: beginTransaction
* Signature: (JJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_options_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
* Signature: (JJJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_options_handle, jlong jtxn_options_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jtxn_options_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
* Signature: (JJJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_options_handle, jlong jold_txn_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jold_txn_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
* Signature: (JJJJ)J
*/
jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jlong jwrite_options_handle, jlong jtxn_options_handle,
- jlong jold_txn_handle) {
+ JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle,
+ jlong jtxn_options_handle, jlong jold_txn_handle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
auto* write_options =
reinterpret_cast<rocksdb::WriteOptions*>(jwrite_options_handle);
* Method: getTransactionByName
* Signature: (JLjava/lang/String;)J
*/
-jlong Java_org_rocksdb_TransactionDB_getTransactionByName(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle,
- jstring jname) {
+jlong Java_org_rocksdb_TransactionDB_getTransactionByName(
+ JNIEnv* env, jobject, jlong jhandle, jstring jname) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
const char* name = env->GetStringUTFChars(jname, nullptr);
if (name == nullptr) {
* Signature: (J)[J
*/
jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions(
- JNIEnv* env, jobject /*jobj*/, jlong jhandle) {
+ JNIEnv* env, jobject, jlong jhandle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
std::vector<rocksdb::Transaction*> txns;
txn_db->GetAllPreparedTransactions(&txns);
* Method: getLockStatusData
* Signature: (J)Ljava/util/Map;
*/
-jobject Java_org_rocksdb_TransactionDB_getLockStatusData(JNIEnv* env,
- jobject /*jobj*/,
- jlong jhandle) {
+jobject Java_org_rocksdb_TransactionDB_getLockStatusData(
+ JNIEnv* env, jobject, jlong jhandle) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
const std::unordered_multimap<uint32_t, rocksdb::KeyLockInfo>
lock_status_data = txn_db->GetLockStatusData();
return nullptr;
}
- const rocksdb::HashMapJni::FnMapKV<const int32_t, const rocksdb::KeyLockInfo>
+ const rocksdb::HashMapJni::FnMapKV<const int32_t, const rocksdb::KeyLockInfo, jobject, jobject>
fn_map_kv =
[env](
const std::pair<const int32_t, const rocksdb::KeyLockInfo>&
* Signature: (JI)V
*/
void Java_org_rocksdb_TransactionDB_setDeadlockInfoBufferSize(
- JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle,
- jint jdeadlock_info_buffer_size) {
+ JNIEnv*, jobject, jlong jhandle, jint jdeadlock_info_buffer_size) {
auto* txn_db = reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
txn_db->SetDeadlockInfoBufferSize(jdeadlock_info_buffer_size);
}
-
-/*
- * Class: org_rocksdb_TransactionDB
- * Method: disposeInternal
- * Signature: (J)V
- */
-void Java_org_rocksdb_TransactionDB_disposeInternal(JNIEnv* /*env*/,
- jobject /*jobj*/,
- jlong jhandle) {
- delete reinterpret_cast<rocksdb::TransactionDB*>(jhandle);
-}
* Method: open
* Signature: (JLjava/lang/String;IZ)J
*/
-jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, jclass /*jcls*/,
- jlong joptions_handle, jstring jdb_path,
- jint jttl, jboolean jread_only) {
+jlong Java_org_rocksdb_TtlDB_open(
+ JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path, jint jttl,
+ jboolean jread_only) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
// exception thrown: OutOfMemoryError
* Method: openCF
* Signature: (JLjava/lang/String;[[B[J[IZ)[J
*/
-jlongArray Java_org_rocksdb_TtlDB_openCF(JNIEnv* env, jclass /*jcls*/,
- jlong jopt_handle, jstring jdb_path,
- jobjectArray jcolumn_names,
- jlongArray jcolumn_options,
- jintArray jttls, jboolean jread_only) {
+jlongArray Java_org_rocksdb_TtlDB_openCF(
+ JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path,
+ jobjectArray jcolumn_names, jlongArray jcolumn_options,
+ jintArray jttls, jboolean jread_only) {
const char* db_path = env->GetStringUTFChars(jdb_path, nullptr);
if (db_path == nullptr) {
// exception thrown: OutOfMemoryError
}
}
+/*
+ * Class: org_rocksdb_TtlDB
+ * Method: disposeInternal
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TtlDB_disposeInternal(
+ JNIEnv*, jobject, jlong jhandle) {
+ auto* ttl_db = reinterpret_cast<rocksdb::DBWithTTL*>(jhandle);
+ assert(ttl_db != nullptr);
+ delete ttl_db;
+}
+
+/*
+ * Class: org_rocksdb_TtlDB
+ * Method: closeDatabase
+ * Signature: (J)V
+ */
+void Java_org_rocksdb_TtlDB_closeDatabase(
+ JNIEnv* /* env */, jclass, jlong /* jhandle */) {
+ //auto* ttl_db = reinterpret_cast<rocksdb::DBWithTTL*>(jhandle);
+ //assert(ttl_db != nullptr);
+ //rocksdb::Status s = ttl_db->Close();
+ //rocksdb::RocksDBExceptionJni::ThrowNew(env, s);
+
+ //TODO(AR) this is disabled until https://github.com/facebook/rocksdb/issues/4818 is resolved!
+}
+
/*
* Class: org_rocksdb_TtlDB
* Method: createColumnFamilyWithTtl
* Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J;
*/
jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(
- JNIEnv* env, jobject /*jobj*/, jlong jdb_handle, jbyteArray jcolumn_name,
+ JNIEnv* env, jobject, jlong jdb_handle, jbyteArray jcolumn_name,
jlong jcolumn_options, jint jttl) {
jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr);
if (cfname == nullptr) {
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the "bridge" between Java and C++ for
+// rocksdb::WalFilter.
+
+#include <jni.h>
+
+#include "include/org_rocksdb_AbstractWalFilter.h"
+#include "rocksjni/wal_filter_jnicallback.h"
+
+/*
+ * Class: org_rocksdb_AbstractWalFilter
+ * Method: createNewWalFilter
+ * Signature: ()J
+ */
+jlong Java_org_rocksdb_AbstractWalFilter_createNewWalFilter(
+ JNIEnv* env, jobject jobj) {
+ auto* wal_filter = new rocksdb::WalFilterJniCallback(env, jobj);
+ return reinterpret_cast<jlong>(wal_filter);
+}
\ No newline at end of file
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// rocksdb::WalFilter.
+
+#include "rocksjni/wal_filter_jnicallback.h"
+#include "rocksjni/portal.h"
+
+namespace rocksdb {
+WalFilterJniCallback::WalFilterJniCallback(
+ JNIEnv* env, jobject jwal_filter)
+ : JniCallback(env, jwal_filter) {
+ // Note: The name of a WalFilter will not change during it's lifetime,
+ // so we cache it in a global var
+ jmethodID jname_mid = AbstractWalFilterJni::getNameMethodId(env);
+ if(jname_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+ jstring jname = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid);
+ if(env->ExceptionCheck()) {
+ // exception thrown
+ return;
+ }
+ jboolean has_exception = JNI_FALSE;
+ m_name = JniUtil::copyString(env, jname,
+ &has_exception); // also releases jname
+ if (has_exception == JNI_TRUE) {
+ // exception thrown
+ return;
+ }
+
+ m_column_family_log_number_map_mid =
+ AbstractWalFilterJni::getColumnFamilyLogNumberMapMethodId(env);
+ if(m_column_family_log_number_map_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+
+ m_log_record_found_proxy_mid =
+ AbstractWalFilterJni::getLogRecordFoundProxyMethodId(env);
+ if(m_log_record_found_proxy_mid == nullptr) {
+ // exception thrown: NoSuchMethodException or OutOfMemoryError
+ return;
+ }
+}
+
+void WalFilterJniCallback::ColumnFamilyLogNumberMap(
+ const std::map<uint32_t, uint64_t>& cf_lognumber_map,
+ const std::map<std::string, uint32_t>& cf_name_id_map) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return;
+ }
+
+ jobject jcf_lognumber_map =
+ rocksdb::HashMapJni::fromCppMap(env, &cf_lognumber_map);
+ if (jcf_lognumber_map == nullptr) {
+ // exception occurred
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ jobject jcf_name_id_map =
+ rocksdb::HashMapJni::fromCppMap(env, &cf_name_id_map);
+ if (jcf_name_id_map == nullptr) {
+ // exception occurred
+ env->ExceptionDescribe(); // print out exception to stderr
+ env->DeleteLocalRef(jcf_lognumber_map);
+ releaseJniEnv(attached_thread);
+ return;
+ }
+
+ env->CallVoidMethod(m_jcallback_obj,
+ m_column_family_log_number_map_mid,
+ jcf_lognumber_map,
+ jcf_name_id_map);
+
+ env->DeleteLocalRef(jcf_lognumber_map);
+ env->DeleteLocalRef(jcf_name_id_map);
+
+ if(env->ExceptionCheck()) {
+ // exception thrown from CallVoidMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ }
+
+ releaseJniEnv(attached_thread);
+}
+
+ WalFilter::WalProcessingOption WalFilterJniCallback::LogRecordFound(
+ unsigned long long log_number, const std::string& log_file_name,
+ const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) {
+ jboolean attached_thread = JNI_FALSE;
+ JNIEnv* env = getJniEnv(&attached_thread);
+ if (env == nullptr) {
+ return WalFilter::WalProcessingOption::kCorruptedRecord;
+ }
+
+ jstring jlog_file_name = JniUtil::toJavaString(env, &log_file_name);
+ if (jlog_file_name == nullptr) {
+ // exception occcurred
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return WalFilter::WalProcessingOption::kCorruptedRecord;
+ }
+
+ jshort jlog_record_found_result = env->CallShortMethod(m_jcallback_obj,
+ m_log_record_found_proxy_mid,
+ static_cast<jlong>(log_number),
+ jlog_file_name,
+ reinterpret_cast<jlong>(&batch),
+ reinterpret_cast<jlong>(new_batch));
+
+ env->DeleteLocalRef(jlog_file_name);
+
+ if (env->ExceptionCheck()) {
+ // exception thrown from CallShortMethod
+ env->ExceptionDescribe(); // print out exception to stderr
+ releaseJniEnv(attached_thread);
+ return WalFilter::WalProcessingOption::kCorruptedRecord;
+ }
+
+ // unpack WalProcessingOption and batch_changed from jlog_record_found_result
+ jbyte jwal_processing_option_value = (jlog_record_found_result >> 8) & 0xFF;
+ jbyte jbatch_changed_value = jlog_record_found_result & 0xFF;
+
+ releaseJniEnv(attached_thread);
+
+ *batch_changed = jbatch_changed_value == JNI_TRUE;
+
+ return WalProcessingOptionJni::toCppWalProcessingOption(
+ jwal_processing_option_value);
+}
+
+const char* WalFilterJniCallback::Name() const {
+ return m_name.get();
+}
+
+} // namespace rocksdb
\ No newline at end of file
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+//
+// This file implements the callback "bridge" between Java and C++ for
+// rocksdb::WalFilter.
+
+#ifndef JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
+#define JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
+
+#include <jni.h>
+#include <map>
+#include <memory>
+#include <string>
+
+#include "rocksdb/wal_filter.h"
+#include "rocksjni/jnicallback.h"
+
+namespace rocksdb {
+
+class WalFilterJniCallback : public JniCallback, public WalFilter {
+ public:
+ WalFilterJniCallback(
+ JNIEnv* env, jobject jwal_filter);
+ virtual void ColumnFamilyLogNumberMap(
+ const std::map<uint32_t, uint64_t>& cf_lognumber_map,
+ const std::map<std::string, uint32_t>& cf_name_id_map);
+ virtual WalFilter::WalProcessingOption LogRecordFound(
+ unsigned long long log_number, const std::string& log_file_name,
+ const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed);
+ virtual const char* Name() const;
+
+ private:
+ std::unique_ptr<const char[]> m_name;
+ jmethodID m_column_family_log_number_map_mid;
+ jmethodID m_log_record_found_proxy_mid;
+};
+
+} //namespace rocksdb
+
+#endif // JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_
* A flag indicating whether the current {@code AbstractNativeReference} is
* responsible to free the underlying C++ object
*/
- private final AtomicBoolean owningHandle_;
+ protected final AtomicBoolean owningHandle_;
protected AbstractImmutableNativeReference(final boolean owningHandle) {
this.owningHandle_ = new AtomicBoolean(owningHandle);
--- /dev/null
+package org.rocksdb;
+
+import java.util.*;
+
+public abstract class AbstractMutableOptions {
+
+ protected static final String KEY_VALUE_PAIR_SEPARATOR = ";";
+ protected static final char KEY_VALUE_SEPARATOR = '=';
+ static final String INT_ARRAY_INT_SEPARATOR = ",";
+
+ protected final String[] keys;
+ private final String[] values;
+
+ /**
+ * User must use builder pattern, or parser.
+ *
+ * @param keys the keys
+ * @param values the values
+ */
+ protected AbstractMutableOptions(final String[] keys, final String[] values) {
+ this.keys = keys;
+ this.values = values;
+ }
+
+ String[] getKeys() {
+ return keys;
+ }
+
+ String[] getValues() {
+ return values;
+ }
+
+ /**
+ * Returns a string representation of MutableOptions which
+ * is suitable for consumption by {@code #parse(String)}.
+ *
+ * @return String representation of MutableOptions
+ */
+ @Override
+ public String toString() {
+ final StringBuilder buffer = new StringBuilder();
+ for(int i = 0; i < keys.length; i++) {
+ buffer
+ .append(keys[i])
+ .append(KEY_VALUE_SEPARATOR)
+ .append(values[i]);
+
+ if(i + 1 < keys.length) {
+ buffer.append(KEY_VALUE_PAIR_SEPARATOR);
+ }
+ }
+ return buffer.toString();
+ }
+
+ public static abstract class AbstractMutableOptionsBuilder<
+ T extends AbstractMutableOptions,
+ U extends AbstractMutableOptionsBuilder<T, U, K>,
+ K extends MutableOptionKey> {
+
+ private final Map<K, MutableOptionValue<?>> options = new LinkedHashMap<>();
+
+ protected abstract U self();
+
+ /**
+ * Get all of the possible keys
+ *
+ * @return A map of all keys, indexed by name.
+ */
+ protected abstract Map<String, K> allKeys();
+
+ /**
+ * Construct a sub-class instance of {@link AbstractMutableOptions}.
+ *
+ * @param keys the keys
+ * @param values the values
+ *
+ * @return an instance of the options.
+ */
+ protected abstract T build(final String[] keys, final String[] values);
+
+ public T build() {
+ final String keys[] = new String[options.size()];
+ final String values[] = new String[options.size()];
+
+ int i = 0;
+ for (final Map.Entry<K, MutableOptionValue<?>> option : options.entrySet()) {
+ keys[i] = option.getKey().name();
+ values[i] = option.getValue().asString();
+ i++;
+ }
+
+ return build(keys, values);
+ }
+
+ protected U setDouble(
+ final K key, final double value) {
+ if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) {
+ throw new IllegalArgumentException(
+ key + " does not accept a double value");
+ }
+ options.put(key, MutableOptionValue.fromDouble(value));
+ return self();
+ }
+
+ protected double getDouble(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asDouble();
+ }
+
+ protected U setLong(
+ final K key, final long value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.LONG) {
+ throw new IllegalArgumentException(
+ key + " does not accept a long value");
+ }
+ options.put(key, MutableOptionValue.fromLong(value));
+ return self();
+ }
+
+ protected long getLong(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asLong();
+ }
+
+ protected U setInt(
+ final K key, final int value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.INT) {
+ throw new IllegalArgumentException(
+ key + " does not accept an integer value");
+ }
+ options.put(key, MutableOptionValue.fromInt(value));
+ return self();
+ }
+
+ protected int getInt(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asInt();
+ }
+
+ protected U setBoolean(
+ final K key, final boolean value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) {
+ throw new IllegalArgumentException(
+ key + " does not accept a boolean value");
+ }
+ options.put(key, MutableOptionValue.fromBoolean(value));
+ return self();
+ }
+
+ protected boolean getBoolean(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asBoolean();
+ }
+
+ protected U setIntArray(
+ final K key, final int[] value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) {
+ throw new IllegalArgumentException(
+ key + " does not accept an int array value");
+ }
+ options.put(key, MutableOptionValue.fromIntArray(value));
+ return self();
+ }
+
+ protected int[] getIntArray(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+ return value.asIntArray();
+ }
+
+ protected <N extends Enum<N>> U setEnum(
+ final K key, final N value) {
+ if(key.getValueType() != MutableOptionKey.ValueType.ENUM) {
+ throw new IllegalArgumentException(
+ key + " does not accept a Enum value");
+ }
+ options.put(key, MutableOptionValue.fromEnum(value));
+ return self();
+ }
+
+ protected <N extends Enum<N>> N getEnum(final K key)
+ throws NoSuchElementException, NumberFormatException {
+ final MutableOptionValue<?> value = options.get(key);
+ if(value == null) {
+ throw new NoSuchElementException(key.name() + " has not been set");
+ }
+
+ if(!(value instanceof MutableOptionValue.MutableOptionEnumValue)) {
+ throw new NoSuchElementException(key.name() + " is not of Enum type");
+ }
+
+ return ((MutableOptionValue.MutableOptionEnumValue<N>)value).asObject();
+ }
+
+ public U fromString(
+ final String keyStr, final String valueStr)
+ throws IllegalArgumentException {
+ Objects.requireNonNull(keyStr);
+ Objects.requireNonNull(valueStr);
+
+ final K key = allKeys().get(keyStr);
+ switch(key.getValueType()) {
+ case DOUBLE:
+ return setDouble(key, Double.parseDouble(valueStr));
+
+ case LONG:
+ return setLong(key, Long.parseLong(valueStr));
+
+ case INT:
+ return setInt(key, Integer.parseInt(valueStr));
+
+ case BOOLEAN:
+ return setBoolean(key, Boolean.parseBoolean(valueStr));
+
+ case INT_ARRAY:
+ final String[] strInts = valueStr
+ .trim().split(INT_ARRAY_INT_SEPARATOR);
+ if(strInts == null || strInts.length == 0) {
+ throw new IllegalArgumentException(
+ "int array value is not correctly formatted");
+ }
+
+ final int value[] = new int[strInts.length];
+ int i = 0;
+ for(final String strInt : strInts) {
+ value[i++] = Integer.parseInt(strInt);
+ }
+ return setIntArray(key, value);
+ }
+
+ throw new IllegalStateException(
+ key + " has unknown value type: " + key.getValueType());
+ }
+ }
+}
--- /dev/null
+package org.rocksdb;
+
+/**
+ * Base class for Table Filters.
+ */
+public abstract class AbstractTableFilter
+ extends RocksCallbackObject implements TableFilter {
+
+ protected AbstractTableFilter() {
+ super();
+ }
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewTableFilter();
+ }
+
+ private native long createNewTableFilter();
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Base class for TraceWriters.
+ */
+public abstract class AbstractTraceWriter
+ extends RocksCallbackObject implements TraceWriter {
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewTraceWriter();
+ }
+
+ /**
+ * Called from JNI, proxy for {@link TraceWriter#write(Slice)}.
+ *
+ * @param sliceHandle the native handle of the slice (which we do not own)
+ *
+ * @return short (2 bytes) where the first byte is the
+ * {@link Status.Code#getValue()} and the second byte is the
+ * {@link Status.SubCode#getValue()}.
+ */
+ private short writeProxy(final long sliceHandle) {
+ try {
+ write(new Slice(sliceHandle));
+ return statusToShort(Status.Code.Ok, Status.SubCode.None);
+ } catch (final RocksDBException e) {
+ return statusToShort(e.getStatus());
+ }
+ }
+
+ /**
+ * Called from JNI, proxy for {@link TraceWriter#closeWriter()}.
+ *
+ * @return short (2 bytes) where the first byte is the
+ * {@link Status.Code#getValue()} and the second byte is the
+ * {@link Status.SubCode#getValue()}.
+ */
+ private short closeWriterProxy() {
+ try {
+ closeWriter();
+ return statusToShort(Status.Code.Ok, Status.SubCode.None);
+ } catch (final RocksDBException e) {
+ return statusToShort(e.getStatus());
+ }
+ }
+
+ private static short statusToShort(/*@Nullable*/ final Status status) {
+ final Status.Code code = status != null && status.getCode() != null
+ ? status.getCode()
+ : Status.Code.IOError;
+ final Status.SubCode subCode = status != null && status.getSubCode() != null
+ ? status.getSubCode()
+ : Status.SubCode.None;
+ return statusToShort(code, subCode);
+ }
+
+ private static short statusToShort(final Status.Code code,
+ final Status.SubCode subCode) {
+ short result = (short)(code.getValue() << 8);
+ return (short)(result | subCode.getValue());
+ }
+
+ private native long createNewTraceWriter();
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Base class for WAL Filters.
+ */
+public abstract class AbstractWalFilter
+ extends RocksCallbackObject implements WalFilter {
+
+ @Override
+ protected long initializeNative(final long... nativeParameterHandles) {
+ return createNewWalFilter();
+ }
+
+ /**
+ * Called from JNI, proxy for
+ * {@link WalFilter#logRecordFound(long, String, WriteBatch, WriteBatch)}.
+ *
+ * @param logNumber the log handle.
+ * @param logFileName the log file name
+ * @param batchHandle the native handle of a WriteBatch (which we do not own)
+ * @param newBatchHandle the native handle of a
+ * new WriteBatch (which we do not own)
+ *
+ * @return short (2 bytes) where the first byte is the
+ * {@link WalFilter.LogRecordFoundResult#walProcessingOption}
+ * {@link WalFilter.LogRecordFoundResult#batchChanged}.
+ */
+ private short logRecordFoundProxy(final long logNumber,
+ final String logFileName, final long batchHandle,
+ final long newBatchHandle) {
+ final LogRecordFoundResult logRecordFoundResult = logRecordFound(
+ logNumber, logFileName, new WriteBatch(batchHandle),
+ new WriteBatch(newBatchHandle));
+ return logRecordFoundResultToShort(logRecordFoundResult);
+ }
+
+ private static short logRecordFoundResultToShort(
+ final LogRecordFoundResult logRecordFoundResult) {
+ short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8);
+ return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0));
+ }
+
+ private native long createNewWalFilter();
+}
* @return true if reporting is enabled
*/
boolean reportBgIoStats();
+
+ /**
+ * Non-bottom-level files older than TTL will go through the compaction
+ * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be
+ * set to -1.
+ *
+ * Enabled only for level compaction for now.
+ *
+ * Default: 0 (disabled)
+ *
+ * Dynamically changeable through
+ * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
+ *
+ * @param ttl the time-to-live.
+ *
+ * @return the reference to the current options.
+ */
+ T setTtl(final long ttl);
+
+ /**
+ * Get the TTL for Non-bottom-level files that will go through the compaction
+ * process.
+ *
+ * See {@link #setTtl(long)}.
+ *
+ * @return the time-to-live.
+ */
+ long ttl();
}
*
* BlockBasedTable is a RocksDB's default SST file format.
*/
+//TODO(AR) should be renamed BlockBasedTableOptions
public class BlockBasedTableConfig extends TableFormatConfig {
public BlockBasedTableConfig() {
- noBlockCache_ = false;
- blockCacheSize_ = 8 * 1024 * 1024;
- blockCacheNumShardBits_ = 0;
- blockCache_ = null;
- blockSize_ = 4 * 1024;
- blockSizeDeviation_ = 10;
- blockRestartInterval_ = 16;
- wholeKeyFiltering_ = true;
- filter_ = null;
- cacheIndexAndFilterBlocks_ = false;
- cacheIndexAndFilterBlocksWithHighPriority_ = false;
- pinL0FilterAndIndexBlocksInCache_ = false;
- partitionFilters_ = false;
- metadataBlockSize_ = 4096;
- pinTopLevelIndexAndFilter_ = true;
- hashIndexAllowCollision_ = true;
- blockCacheCompressedSize_ = 0;
- blockCacheCompressedNumShardBits_ = 0;
- checksumType_ = ChecksumType.kCRC32c;
- indexType_ = IndexType.kBinarySearch;
- formatVersion_ = 0;
+ //TODO(AR) flushBlockPolicyFactory
+ cacheIndexAndFilterBlocks = false;
+ cacheIndexAndFilterBlocksWithHighPriority = false;
+ pinL0FilterAndIndexBlocksInCache = false;
+ pinTopLevelIndexAndFilter = true;
+ indexType = IndexType.kBinarySearch;
+ dataBlockIndexType = DataBlockIndexType.kDataBlockBinarySearch;
+ dataBlockHashTableUtilRatio = 0.75;
+ checksumType = ChecksumType.kCRC32c;
+ noBlockCache = false;
+ blockCache = null;
+ persistentCache = null;
+ blockCacheCompressed = null;
+ blockSize = 4 * 1024;
+ blockSizeDeviation = 10;
+ blockRestartInterval = 16;
+ indexBlockRestartInterval = 1;
+ metadataBlockSize = 4096;
+ partitionFilters = false;
+ useDeltaEncoding = true;
+ filterPolicy = null;
+ wholeKeyFiltering = true;
+ verifyCompression = true;
+ readAmpBytesPerBit = 0;
+ formatVersion = 2;
+ enableIndexCompression = true;
+ blockAlign = false;
+
+ // NOTE: ONLY used if blockCache == null
+ blockCacheSize = 8 * 1024 * 1024;
+ blockCacheNumShardBits = 0;
+
+ // NOTE: ONLY used if blockCacheCompressed == null
+ blockCacheCompressedSize = 0;
+ blockCacheCompressedNumShardBits = 0;
}
/**
- * Disable block cache. If this is set to true,
- * then no block cache should be used, and the block_cache should
- * point to a {@code nullptr} object.
- * Default: false
+ * Indicating if we'd put index/filter blocks to the block cache.
+ * If not specified, each "table reader" object will pre-load index/filter
+ * block during table initialization.
*
- * @param noBlockCache if use block cache
+ * @return if index and filter blocks should be put in block cache.
+ */
+ public boolean cacheIndexAndFilterBlocks() {
+ return cacheIndexAndFilterBlocks;
+ }
+
+ /**
+ * Indicating if we'd put index/filter blocks to the block cache.
+ * If not specified, each "table reader" object will pre-load index/filter
+ * block during table initialization.
+ *
+ * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache.
* @return the reference to the current config.
*/
- public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) {
- noBlockCache_ = noBlockCache;
+ public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
+ final boolean cacheIndexAndFilterBlocks) {
+ this.cacheIndexAndFilterBlocks = cacheIndexAndFilterBlocks;
return this;
}
/**
- * @return if block cache is disabled
+ * Indicates if index and filter blocks will be treated as high-priority in the block cache.
+ * See note below about applicability. If not specified, defaults to false.
+ *
+ * @return if index and filter blocks will be treated as high-priority.
*/
- public boolean noBlockCache() {
- return noBlockCache_;
+ public boolean cacheIndexAndFilterBlocksWithHighPriority() {
+ return cacheIndexAndFilterBlocksWithHighPriority;
}
/**
- * Set the amount of cache in bytes that will be used by RocksDB.
- * If cacheSize is non-positive, then cache will not be used.
- * DEFAULT: 8M
+ * If true, cache index and filter blocks with high priority. If set to true,
+ * depending on implementation of block cache, index and filter blocks may be
+ * less likely to be evicted than data blocks.
*
- * @param blockCacheSize block cache size in bytes
+ * @param cacheIndexAndFilterBlocksWithHighPriority if index and filter blocks
+ * will be treated as high-priority.
* @return the reference to the current config.
*/
- public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) {
- blockCacheSize_ = blockCacheSize;
+ public BlockBasedTableConfig setCacheIndexAndFilterBlocksWithHighPriority(
+ final boolean cacheIndexAndFilterBlocksWithHighPriority) {
+ this.cacheIndexAndFilterBlocksWithHighPriority = cacheIndexAndFilterBlocksWithHighPriority;
return this;
}
/**
- * @return block cache size in bytes
+ * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
+ If not specified, defaults to false.
+ *
+ * @return if L0 index and filter blocks should be pinned to the block cache.
*/
- public long blockCacheSize() {
- return blockCacheSize_;
+ public boolean pinL0FilterAndIndexBlocksInCache() {
+ return pinL0FilterAndIndexBlocksInCache;
+ }
+
+ /**
+ * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
+ If not specified, defaults to false.
+ *
+ * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache(
+ final boolean pinL0FilterAndIndexBlocksInCache) {
+ this.pinL0FilterAndIndexBlocksInCache = pinL0FilterAndIndexBlocksInCache;
+ return this;
+ }
+
+ /**
+ * Indicates if top-level index and filter blocks should be pinned.
+ *
+ * @return if top-level index and filter blocks should be pinned.
+ */
+ public boolean pinTopLevelIndexAndFilter() {
+ return pinTopLevelIndexAndFilter;
+ }
+
+ /**
+ * If cacheIndexAndFilterBlocks is true and the below is true, then
+ * the top-level index of partitioned filter and index blocks are stored in
+ * the cache, but a reference is held in the "table reader" object so the
+ * blocks are pinned and only evicted from cache when the table reader is
+ * freed. This is not limited to l0 in LSM tree.
+ *
+ * @param pinTopLevelIndexAndFilter if top-level index and filter blocks should be pinned.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setPinTopLevelIndexAndFilter(final boolean pinTopLevelIndexAndFilter) {
+ this.pinTopLevelIndexAndFilter = pinTopLevelIndexAndFilter;
+ return this;
+ }
+
+ /**
+ * Get the index type.
+ *
+ * @return the currently set index type
+ */
+ public IndexType indexType() {
+ return indexType;
+ }
+
+ /**
+ * Sets the index type to used with this table.
+ *
+ * @param indexType {@link org.rocksdb.IndexType} value
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setIndexType(
+ final IndexType indexType) {
+ this.indexType = indexType;
+ return this;
+ }
+
+ /**
+ * Get the data block index type.
+ *
+ * @return the currently set data block index type
+ */
+ public DataBlockIndexType dataBlockIndexType() {
+ return dataBlockIndexType;
+ }
+
+ /**
+ * Sets the data block index type to used with this table.
+ *
+ * @param dataBlockIndexType {@link org.rocksdb.DataBlockIndexType} value
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setDataBlockIndexType(
+ final DataBlockIndexType dataBlockIndexType) {
+ this.dataBlockIndexType = dataBlockIndexType;
+ return this;
+ }
+
+ /**
+ * Get the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is
+ * {@link DataBlockIndexType#kDataBlockBinaryAndHash}.
+ *
+ * @return the #entries/#buckets.
+ */
+ public double dataBlockHashTableUtilRatio() {
+ return dataBlockHashTableUtilRatio;
+ }
+
+ /**
+ * Set the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is
+ * {@link DataBlockIndexType#kDataBlockBinaryAndHash}.
+ *
+ * @param dataBlockHashTableUtilRatio #entries/#buckets
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setDataBlockHashTableUtilRatio(
+ final double dataBlockHashTableUtilRatio) {
+ this.dataBlockHashTableUtilRatio = dataBlockHashTableUtilRatio;
+ return this;
+ }
+
+ /**
+ * Get the checksum type to be used with this table.
+ *
+ * @return the currently set checksum type
+ */
+ public ChecksumType checksumType() {
+ return checksumType;
+ }
+
+ /**
+ * Sets
+ *
+ * @param checksumType {@link org.rocksdb.ChecksumType} value.
+ * @return the reference to the current option.
+ */
+ public BlockBasedTableConfig setChecksumType(
+ final ChecksumType checksumType) {
+ this.checksumType = checksumType;
+ return this;
+ }
+
+ /**
+ * Determine if the block cache is disabled.
+ *
+ * @return if block cache is disabled
+ */
+ public boolean noBlockCache() {
+ return noBlockCache;
+ }
+
+ /**
+ * Disable block cache. If this is set to true,
+ * then no block cache should be used, and the {@link #setBlockCache(Cache)}
+ * should point to a {@code null} object.
+ *
+ * Default: false
+ *
+ * @param noBlockCache if use block cache
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) {
+ this.noBlockCache = noBlockCache;
+ return this;
}
/**
* {@link org.rocksdb.Cache} instance can be re-used in multiple options
* instances.
*
- * @param cache {@link org.rocksdb.Cache} Cache java instance (e.g. LRUCache).
+ * @param blockCache {@link org.rocksdb.Cache} Cache java instance
+ * (e.g. LRUCache).
+ *
* @return the reference to the current config.
*/
- public BlockBasedTableConfig setBlockCache(final Cache cache) {
- blockCache_ = cache;
+ public BlockBasedTableConfig setBlockCache(final Cache blockCache) {
+ this.blockCache = blockCache;
return this;
}
/**
- * Controls the number of shards for the block cache.
- * This is applied only if cacheSize is set to non-negative.
+ * Use the specified persistent cache.
*
- * @param blockCacheNumShardBits the number of shard bits. The resulting
- * number of shards would be 2 ^ numShardBits. Any negative
- * number means use default settings."
- * @return the reference to the current option.
+ * If {@code !null} use the specified cache for pages read from device,
+ * otherwise no page cache is used.
+ *
+ * @param persistentCache the persistent cache
+ *
+ * @return the reference to the current config.
*/
- public BlockBasedTableConfig setCacheNumShardBits(
- final int blockCacheNumShardBits) {
- blockCacheNumShardBits_ = blockCacheNumShardBits;
+ public BlockBasedTableConfig setPersistentCache(
+ final PersistentCache persistentCache) {
+ this.persistentCache = persistentCache;
return this;
}
/**
- * Returns the number of shard bits used in the block cache.
- * The resulting number of shards would be 2 ^ (returned value).
- * Any negative number means use default settings.
+ * Use the specified cache for compressed blocks.
*
- * @return the number of shard bits used in the block cache.
+ * If {@code null}, RocksDB will not use a compressed block cache.
+ *
+ * Note: though it looks similar to {@link #setBlockCache(Cache)}, RocksDB
+ * doesn't put the same type of object there.
+ *
+ * {@link org.rocksdb.Cache} should not be disposed before options instances
+ * using this cache is disposed.
+ *
+ * {@link org.rocksdb.Cache} instance can be re-used in multiple options
+ * instances.
+ *
+ * @param blockCacheCompressed {@link org.rocksdb.Cache} Cache java instance
+ * (e.g. LRUCache).
+ *
+ * @return the reference to the current config.
*/
- public int cacheNumShardBits() {
- return blockCacheNumShardBits_;
+ public BlockBasedTableConfig setBlockCacheCompressed(
+ final Cache blockCacheCompressed) {
+ this.blockCacheCompressed = blockCacheCompressed;
+ return this;
}
/**
- * Approximate size of user data packed per block. Note that the
+ * Get the approximate size of user data packed per block.
+ *
+ * @return block size in bytes
+ */
+ public long blockSize() {
+ return blockSize;
+ }
+
+ /**
+ * Approximate size of user data packed per block. Note that the
* block size specified here corresponds to uncompressed data. The
* actual size of the unit read from disk may be smaller if
* compression is enabled. This parameter can be changed dynamically.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockSize(final long blockSize) {
- blockSize_ = blockSize;
+ this.blockSize = blockSize;
return this;
}
/**
- * @return block size in bytes
+ * @return the hash table ratio.
*/
- public long blockSize() {
- return blockSize_;
+ public int blockSizeDeviation() {
+ return blockSizeDeviation;
}
/**
* This is used to close a block before it reaches the configured
- * 'block_size'. If the percentage of free space in the current block is less
- * than this specified number and adding a new record to the block will
- * exceed the configured block size, then this block will be closed and the
- * new record will be written to the next block.
+ * {@link #blockSize()}. If the percentage of free space in the current block
+ * is less than this specified number and adding a new record to the block
+ * will exceed the configured block size, then this block will be closed and
+ * the new record will be written to the next block.
+ *
* Default is 10.
*
* @param blockSizeDeviation the deviation to block size allowed
*/
public BlockBasedTableConfig setBlockSizeDeviation(
final int blockSizeDeviation) {
- blockSizeDeviation_ = blockSizeDeviation;
+ this.blockSizeDeviation = blockSizeDeviation;
return this;
}
/**
- * @return the hash table ratio.
+ * Get the block restart interval.
+ *
+ * @return block restart interval
*/
- public int blockSizeDeviation() {
- return blockSizeDeviation_;
+ public int blockRestartInterval() {
+ return blockRestartInterval;
}
/**
- * Set block restart interval
+ * Set the block restart interval.
*
* @param restartInterval block restart interval.
* @return the reference to the current config.
*/
public BlockBasedTableConfig setBlockRestartInterval(
final int restartInterval) {
- blockRestartInterval_ = restartInterval;
+ blockRestartInterval = restartInterval;
return this;
}
/**
- * @return block restart interval
+ * Get the index block restart interval.
+ *
+ * @return index block restart interval
*/
- public int blockRestartInterval() {
- return blockRestartInterval_;
+ public int indexBlockRestartInterval() {
+ return indexBlockRestartInterval;
}
/**
- * If true, place whole keys in the filter (not just prefixes).
- * This must generally be true for gets to be efficient.
- * Default: true
+ * Set the index block restart interval
*
- * @param wholeKeyFiltering if enable whole key filtering
+ * @param restartInterval index block restart interval.
* @return the reference to the current config.
*/
- public BlockBasedTableConfig setWholeKeyFiltering(
- final boolean wholeKeyFiltering) {
- wholeKeyFiltering_ = wholeKeyFiltering;
+ public BlockBasedTableConfig setIndexBlockRestartInterval(
+ final int restartInterval) {
+ indexBlockRestartInterval = restartInterval;
return this;
}
/**
- * @return if whole key filtering is enabled
+ * Get the block size for partitioned metadata.
+ *
+ * @return block size for partitioned metadata.
*/
- public boolean wholeKeyFiltering() {
- return wholeKeyFiltering_;
+ public long metadataBlockSize() {
+ return metadataBlockSize;
+ }
+
+ /**
+ * Set block size for partitioned metadata.
+ *
+ * @param metadataBlockSize Partitioned metadata block size.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setMetadataBlockSize(
+ final long metadataBlockSize) {
+ this.metadataBlockSize = metadataBlockSize;
+ return this;
+ }
+
+ /**
+ * Indicates if we're using partitioned filters.
+ *
+ * @return if we're using partition filters.
+ */
+ public boolean partitionFilters() {
+ return partitionFilters;
+ }
+
+ /**
+ * Use partitioned full filters for each SST file. This option is incompatible
+ * with block-based filters.
+ *
+ * Defaults to false.
+ *
+ * @param partitionFilters use partition filters.
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setPartitionFilters(final boolean partitionFilters) {
+ this.partitionFilters = partitionFilters;
+ return this;
+ }
+
+ /**
+ * Determine if delta encoding is being used to compress block keys.
+ *
+ * @return true if delta encoding is enabled, false otherwise.
+ */
+ public boolean useDeltaEncoding() {
+ return useDeltaEncoding;
+ }
+
+ /**
+ * Use delta encoding to compress keys in blocks.
+ *
+ * NOTE: {@link ReadOptions#pinData()} requires this option to be disabled.
+ *
+ * Default: true
+ *
+ * @param useDeltaEncoding true to enable delta encoding
+ *
+ * @return the reference to the current config.
+ */
+ public BlockBasedTableConfig setUseDeltaEncoding(
+ final boolean useDeltaEncoding) {
+ this.useDeltaEncoding = useDeltaEncoding;
+ return this;
}
/**
* {@link org.rocksdb.Filter} instance can be re-used in multiple options
* instances.
*
- * @param filter {@link org.rocksdb.Filter} Filter Policy java instance.
+ * @param filterPolicy {@link org.rocksdb.Filter} Filter Policy java instance.
* @return the reference to the current config.
*/
+ public BlockBasedTableConfig setFilterPolicy(
+ final Filter filterPolicy) {
+ this.filterPolicy = filterPolicy;
+ return this;
+ }
+
+ /*
+ * @deprecated Use {@link #setFilterPolicy(Filter)}
+ */
+ @Deprecated
public BlockBasedTableConfig setFilter(
final Filter filter) {
- filter_ = filter;
- return this;
+ return setFilterPolicy(filter);
}
/**
- * Indicating if we'd put index/filter blocks to the block cache.
- If not specified, each "table reader" object will pre-load index/filter
- block during table initialization.
+ * Determine if whole keys as opposed to prefixes are placed in the filter.
*
- * @return if index and filter blocks should be put in block cache.
+ * @return if whole key filtering is enabled
*/
- public boolean cacheIndexAndFilterBlocks() {
- return cacheIndexAndFilterBlocks_;
+ public boolean wholeKeyFiltering() {
+ return wholeKeyFiltering;
}
/**
- * Indicating if we'd put index/filter blocks to the block cache.
- If not specified, each "table reader" object will pre-load index/filter
- block during table initialization.
+ * If true, place whole keys in the filter (not just prefixes).
+ * This must generally be true for gets to be efficient.
+ * Default: true
*
- * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache.
+ * @param wholeKeyFiltering if enable whole key filtering
* @return the reference to the current config.
*/
- public BlockBasedTableConfig setCacheIndexAndFilterBlocks(
- final boolean cacheIndexAndFilterBlocks) {
- cacheIndexAndFilterBlocks_ = cacheIndexAndFilterBlocks;
+ public BlockBasedTableConfig setWholeKeyFiltering(
+ final boolean wholeKeyFiltering) {
+ this.wholeKeyFiltering = wholeKeyFiltering;
return this;
}
/**
- * Indicates if index and filter blocks will be treated as high-priority in the block cache.
- * See note below about applicability. If not specified, defaults to false.
+ * Returns true when compression verification is enabled.
*
- * @return if index and filter blocks will be treated as high-priority.
+ * See {@link #setVerifyCompression(boolean)}.
+ *
+ * @return true if compression verification is enabled.
*/
- public boolean cacheIndexAndFilterBlocksWithHighPriority() {
- return cacheIndexAndFilterBlocksWithHighPriority_;
+ public boolean verifyCompression() {
+ return verifyCompression;
}
/**
- * If true, cache index and filter blocks with high priority. If set to true,
- * depending on implementation of block cache, index and filter blocks may be
- * less likely to be evicted than data blocks.
+ * Verify that decompressing the compressed block gives back the input. This
+ * is a verification mode that we use to detect bugs in compression
+ * algorithms.
+ *
+ * @param verifyCompression true to enable compression verification.
*
- * @param cacheIndexAndFilterBlocksWithHighPriority if index and filter blocks
- * will be treated as high-priority.
* @return the reference to the current config.
*/
- public BlockBasedTableConfig setCacheIndexAndFilterBlocksWithHighPriority(
- final boolean cacheIndexAndFilterBlocksWithHighPriority) {
- cacheIndexAndFilterBlocksWithHighPriority_ = cacheIndexAndFilterBlocksWithHighPriority;
+ public BlockBasedTableConfig setVerifyCompression(
+ final boolean verifyCompression) {
+ this.verifyCompression = verifyCompression;
return this;
}
/**
- * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
- If not specified, defaults to false.
+ * Get the Read amplification bytes per-bit.
*
- * @return if L0 index and filter blocks should be pinned to the block cache.
+ * See {@link #setReadAmpBytesPerBit(int)}.
+ *
+ * @return the bytes per-bit.
*/
- public boolean pinL0FilterAndIndexBlocksInCache() {
- return pinL0FilterAndIndexBlocksInCache_;
+ public int readAmpBytesPerBit() {
+ return readAmpBytesPerBit;
}
/**
- * Indicating if we'd like to pin L0 index/filter blocks to the block cache.
- If not specified, defaults to false.
+ * Set the Read amplification bytes per-bit.
+ *
+ * If used, For every data block we load into memory, we will create a bitmap
+ * of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap
+ * will be used to figure out the percentage we actually read of the blocks.
+ *
+ * When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and
+ * Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the
+ * read amplification using this formula
+ * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
+ *
+ * value => memory usage (percentage of loaded blocks memory)
+ * 1 => 12.50 %
+ * 2 => 06.25 %
+ * 4 => 03.12 %
+ * 8 => 01.56 %
+ * 16 => 00.78 %
+ *
+ * Note: This number must be a power of 2, if not it will be sanitized
+ * to be the next lowest power of 2, for example a value of 7 will be
+ * treated as 4, a value of 19 will be treated as 16.
+ *
+ * Default: 0 (disabled)
+ *
+ * @param readAmpBytesPerBit the bytes per-bit
*
- * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache
* @return the reference to the current config.
*/
- public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache(
- final boolean pinL0FilterAndIndexBlocksInCache) {
- pinL0FilterAndIndexBlocksInCache_ = pinL0FilterAndIndexBlocksInCache;
+ public BlockBasedTableConfig setReadAmpBytesPerBit(final int readAmpBytesPerBit) {
+ this.readAmpBytesPerBit = readAmpBytesPerBit;
return this;
}
/**
- * Indicating if we're using partitioned filters. Defaults to false.
+ * Get the format version.
+ * See {@link #setFormatVersion(int)}.
*
- * @return if we're using partition filters.
+ * @return the currently configured format version.
*/
- public boolean partitionFilters() {
- return partitionFilters_;
+ public int formatVersion() {
+ return formatVersion;
}
/**
- * Use partitioned full filters for each SST file. This option is incompatible with
- * block-based filters.
+ * <p>We currently have five versions:</p>
*
- * @param partitionFilters use partition filters.
- * @return the reference to the current config.
+ * <ul>
+ * <li><strong>0</strong> - This version is currently written
+ * out by all RocksDB's versions by default. Can be read by really old
+ * RocksDB's. Doesn't support changing checksum (default is CRC32).</li>
+ * <li><strong>1</strong> - Can be read by RocksDB's versions since 3.0.
+ * Supports non-default checksum, like xxHash. It is written by RocksDB when
+ * BlockBasedTableOptions::checksum is something other than kCRC32c. (version
+ * 0 is silently upconverted)</li>
+ * <li><strong>2</strong> - Can be read by RocksDB's versions since 3.10.
+ * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib
+ * compression. If you don't plan to run RocksDB before version 3.10,
+ * you should probably use this.</li>
+ * <li><strong>3</strong> - Can be read by RocksDB's versions since 5.15. Changes the way we
+ * encode the keys in index blocks. If you don't plan to run RocksDB before
+ * version 5.15, you should probably use this.
+ * This option only affects newly written tables. When reading existing
+ * tables, the information about version is read from the footer.</li>
+ * <li><strong>4</strong> - Can be read by RocksDB's versions since 5.16. Changes the way we
+ * encode the values in index blocks. If you don't plan to run RocksDB before
+ * version 5.16 and you are using index_block_restart_interval > 1, you should
+ * probably use this as it would reduce the index size.</li>
+ * </ul>
+ * <p> This option only affects newly written tables. When reading existing
+ * tables, the information about version is read from the footer.</p>
+ *
+ * @param formatVersion integer representing the version to be used.
+ *
+ * @return the reference to the current option.
*/
- public BlockBasedTableConfig setPartitionFilters(final boolean partitionFilters) {
- partitionFilters_ = partitionFilters;
+ public BlockBasedTableConfig setFormatVersion(
+ final int formatVersion) {
+ assert(formatVersion >= 0 && formatVersion <= 4);
+ this.formatVersion = formatVersion;
return this;
}
/**
- * @return block size for partitioned metadata.
+ * Determine if index compression is enabled.
+ *
+ * See {@link #setEnableIndexCompression(boolean)}.
+ *
+ * @return true if index compression is enabled, false otherwise
*/
- public long metadataBlockSize() {
- return metadataBlockSize_;
+ public boolean enableIndexCompression() {
+ return enableIndexCompression;
}
/**
- * Set block size for partitioned metadata.
+ * Store index blocks on disk in compressed format.
*
- * @param metadataBlockSize Partitioned metadata block size.
- * @return the reference to the current config.
+ * Changing this option to false will avoid the overhead of decompression
+ * if index blocks are evicted and read back.
+ *
+ * @param enableIndexCompression true to enable index compression,
+ * false to disable
+ *
+ * @return the reference to the current option.
*/
- public BlockBasedTableConfig setMetadataBlockSize(
- final long metadataBlockSize) {
- metadataBlockSize_ = metadataBlockSize;
+ public BlockBasedTableConfig setEnableIndexCompression(
+ final boolean enableIndexCompression) {
+ this.enableIndexCompression = enableIndexCompression;
return this;
}
/**
- * Indicates if top-level index and filter blocks should be pinned.
+ * Determines whether data blocks are aligned on the lesser of page size
+ * and block size.
*
- * @return if top-level index and filter blocks should be pinned.
+ * @return true if data blocks are aligned on the lesser of page size
+ * and block size.
*/
- public boolean pinTopLevelIndexAndFilter() {
- return pinTopLevelIndexAndFilter_;
+ public boolean blockAlign() {
+ return blockAlign;
}
/**
- * If cacheIndexAndFilterBlocks is true and the below is true, then
- * the top-level index of partitioned filter and index blocks are stored in
- * the cache, but a reference is held in the "table reader" object so the
- * blocks are pinned and only evicted from cache when the table reader is
- * freed. This is not limited to l0 in LSM tree.
+ * Set whether data blocks should be aligned on the lesser of page size
+ * and block size.
*
- * @param pinTopLevelIndexAndFilter if top-level index and filter blocks should be pinned.
- * @return the reference to the current config.
+ * @param blockAlign true to align data blocks on the lesser of page size
+ * and block size.
+ *
+ * @return the reference to the current option.
*/
- public BlockBasedTableConfig setPinTopLevelIndexAndFilter(final boolean pinTopLevelIndexAndFilter) {
- pinTopLevelIndexAndFilter_ = pinTopLevelIndexAndFilter;
+ public BlockBasedTableConfig setBlockAlign(final boolean blockAlign) {
+ this.blockAlign = blockAlign;
return this;
}
+
/**
- * Influence the behavior when kHashSearch is used.
- if false, stores a precise prefix to block range mapping
- if true, does not store prefix and allows prefix hash collision
- (less memory consumption)
+ * Get the size of the cache in bytes that will be used by RocksDB.
*
- * @return if hash collisions should be allowed.
+ * @return block cache size in bytes
*/
- public boolean hashIndexAllowCollision() {
- return hashIndexAllowCollision_;
+ @Deprecated
+ public long blockCacheSize() {
+ return blockCacheSize;
}
/**
- * Influence the behavior when kHashSearch is used.
- if false, stores a precise prefix to block range mapping
- if true, does not store prefix and allows prefix hash collision
- (less memory consumption)
+ * Set the size of the cache in bytes that will be used by RocksDB.
+ * If cacheSize is non-positive, then cache will not be used.
+ * DEFAULT: 8M
*
- * @param hashIndexAllowCollision points out if hash collisions should be allowed.
+ * @param blockCacheSize block cache size in bytes
* @return the reference to the current config.
+ *
+ * @deprecated Use {@link #setBlockCache(Cache)}.
*/
- public BlockBasedTableConfig setHashIndexAllowCollision(
- final boolean hashIndexAllowCollision) {
- hashIndexAllowCollision_ = hashIndexAllowCollision;
+ @Deprecated
+ public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) {
+ this.blockCacheSize = blockCacheSize;
+ return this;
+ }
+
+ /**
+ * Returns the number of shard bits used in the block cache.
+ * The resulting number of shards would be 2 ^ (returned value).
+ * Any negative number means use default settings.
+ *
+ * @return the number of shard bits used in the block cache.
+ */
+ @Deprecated
+ public int cacheNumShardBits() {
+ return blockCacheNumShardBits;
+ }
+
+ /**
+ * Controls the number of shards for the block cache.
+ * This is applied only if cacheSize is set to non-negative.
+ *
+ * @param blockCacheNumShardBits the number of shard bits. The resulting
+ * number of shards would be 2 ^ numShardBits. Any negative
+ * number means use default settings."
+ * @return the reference to the current option.
+ *
+ * @deprecated Use {@link #setBlockCache(Cache)}.
+ */
+ @Deprecated
+ public BlockBasedTableConfig setCacheNumShardBits(
+ final int blockCacheNumShardBits) {
+ this.blockCacheNumShardBits = blockCacheNumShardBits;
return this;
}
*
* @return size of compressed block cache.
*/
+ @Deprecated
public long blockCacheCompressedSize() {
- return blockCacheCompressedSize_;
+ return blockCacheCompressedSize;
}
/**
*
* @param blockCacheCompressedSize of compressed block cache.
* @return the reference to the current config.
+ *
+ * @deprecated Use {@link #setBlockCacheCompressed(Cache)}.
*/
+ @Deprecated
public BlockBasedTableConfig setBlockCacheCompressedSize(
final long blockCacheCompressedSize) {
- blockCacheCompressedSize_ = blockCacheCompressedSize;
+ this.blockCacheCompressedSize = blockCacheCompressedSize;
return this;
}
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings.
*/
+ @Deprecated
public int blockCacheCompressedNumShardBits() {
- return blockCacheCompressedNumShardBits_;
+ return blockCacheCompressedNumShardBits;
}
/**
* number of shards would be 2 ^ numShardBits. Any negative
* number means use default settings."
* @return the reference to the current option.
+ *
+ * @deprecated Use {@link #setBlockCacheCompressed(Cache)}.
*/
+ @Deprecated
public BlockBasedTableConfig setBlockCacheCompressedNumShardBits(
final int blockCacheCompressedNumShardBits) {
- blockCacheCompressedNumShardBits_ = blockCacheCompressedNumShardBits;
- return this;
- }
-
- /**
- * Sets the checksum type to be used with this table.
- *
- * @param checksumType {@link org.rocksdb.ChecksumType} value.
- * @return the reference to the current option.
- */
- public BlockBasedTableConfig setChecksumType(
- final ChecksumType checksumType) {
- checksumType_ = checksumType;
+ this.blockCacheCompressedNumShardBits = blockCacheCompressedNumShardBits;
return this;
}
/**
+ * Influence the behavior when kHashSearch is used.
+ * if false, stores a precise prefix to block range mapping
+ * if true, does not store prefix and allows prefix hash collision
+ * (less memory consumption)
*
- * @return the currently set checksum type
- */
- public ChecksumType checksumType() {
- return checksumType_;
- }
-
- /**
- * Sets the index type to used with this table.
+ * @return if hash collisions should be allowed.
*
- * @param indexType {@link org.rocksdb.IndexType} value
- * @return the reference to the current option.
+ * @deprecated This option is now deprecated. No matter what value it
+ * is set to, it will behave as
+ * if {@link #hashIndexAllowCollision()} == true.
*/
- public BlockBasedTableConfig setIndexType(
- final IndexType indexType) {
- indexType_ = indexType;
- return this;
+ @Deprecated
+ public boolean hashIndexAllowCollision() {
+ return true;
}
/**
+ * Influence the behavior when kHashSearch is used.
+ * if false, stores a precise prefix to block range mapping
+ * if true, does not store prefix and allows prefix hash collision
+ * (less memory consumption)
*
- * @return the currently set index type
- */
- public IndexType indexType() {
- return indexType_;
- }
-
- /**
- * <p>We currently have three versions:</p>
+ * @param hashIndexAllowCollision points out if hash collisions should be allowed.
*
- * <ul>
- * <li><strong>0</strong> - This version is currently written
- * out by all RocksDB's versions by default. Can be read by really old
- * RocksDB's. Doesn't support changing checksum (default is CRC32).</li>
- * <li><strong>1</strong> - Can be read by RocksDB's versions since 3.0.
- * Supports non-default checksum, like xxHash. It is written by RocksDB when
- * BlockBasedTableOptions::checksum is something other than kCRC32c. (version
- * 0 is silently upconverted)</li>
- * <li><strong>2</strong> - Can be read by RocksDB's versions since 3.10.
- * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib
- * compression. If you don't plan to run RocksDB before version 3.10,
- * you should probably use this.</li>
- * </ul>
- * <p> This option only affects newly written tables. When reading existing
- * tables, the information about version is read from the footer.</p>
+ * @return the reference to the current config.
*
- * @param formatVersion integer representing the version to be used.
- * @return the reference to the current option.
+ * @deprecated This option is now deprecated. No matter what value it
+ * is set to, it will behave as
+ * if {@link #hashIndexAllowCollision()} == true.
*/
- public BlockBasedTableConfig setFormatVersion(
- final int formatVersion) {
- assert(formatVersion >= 0 && formatVersion <= 2);
- formatVersion_ = formatVersion;
+ @Deprecated
+ public BlockBasedTableConfig setHashIndexAllowCollision(
+ final boolean hashIndexAllowCollision) {
+ // no-op
return this;
}
- /**
- *
- * @return the currently configured format version.
- * See also: {@link #setFormatVersion(int)}.
- */
- public int formatVersion() {
- return formatVersion_;
- }
-
+ @Override protected long newTableFactoryHandle() {
+ final long filterPolicyHandle;
+ if (filterPolicy != null) {
+ filterPolicyHandle = filterPolicy.nativeHandle_;
+ } else {
+ filterPolicyHandle = 0;
+ }
+ final long blockCacheHandle;
+ if (blockCache != null) {
+ blockCacheHandle = blockCache.nativeHandle_;
+ } else {
+ blockCacheHandle = 0;
+ }
- @Override protected long newTableFactoryHandle() {
- long filterHandle = 0;
- if (filter_ != null) {
- filterHandle = filter_.nativeHandle_;
+ final long persistentCacheHandle;
+ if (persistentCache != null) {
+ persistentCacheHandle = persistentCache.nativeHandle_;
+ } else {
+ persistentCacheHandle = 0;
}
- long blockCacheHandle = 0;
- if (blockCache_ != null) {
- blockCacheHandle = blockCache_.nativeHandle_;
+ final long blockCacheCompressedHandle;
+ if (blockCacheCompressed != null) {
+ blockCacheCompressedHandle = blockCacheCompressed.nativeHandle_;
+ } else {
+ blockCacheCompressedHandle = 0;
}
- return newTableFactoryHandle(noBlockCache_, blockCacheSize_, blockCacheNumShardBits_,
- blockCacheHandle, blockSize_, blockSizeDeviation_, blockRestartInterval_,
- wholeKeyFiltering_, filterHandle, cacheIndexAndFilterBlocks_,
- cacheIndexAndFilterBlocksWithHighPriority_, pinL0FilterAndIndexBlocksInCache_,
- partitionFilters_, metadataBlockSize_, pinTopLevelIndexAndFilter_,
- hashIndexAllowCollision_, blockCacheCompressedSize_, blockCacheCompressedNumShardBits_,
- checksumType_.getValue(), indexType_.getValue(), formatVersion_);
- }
-
- private native long newTableFactoryHandle(boolean noBlockCache, long blockCacheSize,
- int blockCacheNumShardBits, long blockCacheHandle, long blockSize, int blockSizeDeviation,
- int blockRestartInterval, boolean wholeKeyFiltering, long filterPolicyHandle,
- boolean cacheIndexAndFilterBlocks, boolean cacheIndexAndFilterBlocksWithHighPriority,
- boolean pinL0FilterAndIndexBlocksInCache, boolean partitionFilters, long metadataBlockSize,
- boolean pinTopLevelIndexAndFilter, boolean hashIndexAllowCollision,
- long blockCacheCompressedSize, int blockCacheCompressedNumShardBits,
- byte checkSumType, byte indexType, int formatVersion);
-
- private boolean cacheIndexAndFilterBlocks_;
- private boolean cacheIndexAndFilterBlocksWithHighPriority_;
- private boolean pinL0FilterAndIndexBlocksInCache_;
- private boolean partitionFilters_;
- private long metadataBlockSize_;
- private boolean pinTopLevelIndexAndFilter_;
- private IndexType indexType_;
- private boolean hashIndexAllowCollision_;
- private ChecksumType checksumType_;
- private boolean noBlockCache_;
- private long blockSize_;
- private long blockCacheSize_;
- private int blockCacheNumShardBits_;
- private Cache blockCache_;
- private long blockCacheCompressedSize_;
- private int blockCacheCompressedNumShardBits_;
- private int blockSizeDeviation_;
- private int blockRestartInterval_;
- private Filter filter_;
- private boolean wholeKeyFiltering_;
- private int formatVersion_;
+ return newTableFactoryHandle(cacheIndexAndFilterBlocks,
+ cacheIndexAndFilterBlocksWithHighPriority,
+ pinL0FilterAndIndexBlocksInCache, pinTopLevelIndexAndFilter,
+ indexType.getValue(), dataBlockIndexType.getValue(),
+ dataBlockHashTableUtilRatio, checksumType.getValue(), noBlockCache,
+ blockCacheHandle, persistentCacheHandle, blockCacheCompressedHandle,
+ blockSize, blockSizeDeviation, blockRestartInterval,
+ indexBlockRestartInterval, metadataBlockSize, partitionFilters,
+ useDeltaEncoding, filterPolicyHandle, wholeKeyFiltering,
+ verifyCompression, readAmpBytesPerBit, formatVersion,
+ enableIndexCompression, blockAlign,
+ blockCacheSize, blockCacheNumShardBits,
+ blockCacheCompressedSize, blockCacheCompressedNumShardBits);
+ }
+
+ private native long newTableFactoryHandle(
+ final boolean cacheIndexAndFilterBlocks,
+ final boolean cacheIndexAndFilterBlocksWithHighPriority,
+ final boolean pinL0FilterAndIndexBlocksInCache,
+ final boolean pinTopLevelIndexAndFilter,
+ final byte indexTypeValue,
+ final byte dataBlockIndexTypeValue,
+ final double dataBlockHashTableUtilRatio,
+ final byte checksumTypeValue,
+ final boolean noBlockCache,
+ final long blockCacheHandle,
+ final long persistentCacheHandle,
+ final long blockCacheCompressedHandle,
+ final long blockSize,
+ final int blockSizeDeviation,
+ final int blockRestartInterval,
+ final int indexBlockRestartInterval,
+ final long metadataBlockSize,
+ final boolean partitionFilters,
+ final boolean useDeltaEncoding,
+ final long filterPolicyHandle,
+ final boolean wholeKeyFiltering,
+ final boolean verifyCompression,
+ final int readAmpBytesPerBit,
+ final int formatVersion,
+ final boolean enableIndexCompression,
+ final boolean blockAlign,
+
+ @Deprecated final long blockCacheSize,
+ @Deprecated final int blockCacheNumShardBits,
+
+ @Deprecated final long blockCacheCompressedSize,
+ @Deprecated final int blockCacheCompressedNumShardBits
+ );
+
+ //TODO(AR) flushBlockPolicyFactory
+ private boolean cacheIndexAndFilterBlocks;
+ private boolean cacheIndexAndFilterBlocksWithHighPriority;
+ private boolean pinL0FilterAndIndexBlocksInCache;
+ private boolean pinTopLevelIndexAndFilter;
+ private IndexType indexType;
+ private DataBlockIndexType dataBlockIndexType;
+ private double dataBlockHashTableUtilRatio;
+ private ChecksumType checksumType;
+ private boolean noBlockCache;
+ private Cache blockCache;
+ private PersistentCache persistentCache;
+ private Cache blockCacheCompressed;
+ private long blockSize;
+ private int blockSizeDeviation;
+ private int blockRestartInterval;
+ private int indexBlockRestartInterval;
+ private long metadataBlockSize;
+ private boolean partitionFilters;
+ private boolean useDeltaEncoding;
+ private Filter filterPolicy;
+ private boolean wholeKeyFiltering;
+ private boolean verifyCompression;
+ private int readAmpBytesPerBit;
+ private int formatVersion;
+ private boolean enableIndexCompression;
+ private boolean blockAlign;
+
+ // NOTE: ONLY used if blockCache == null
+ @Deprecated private long blockCacheSize;
+ @Deprecated private int blockCacheNumShardBits;
+
+ // NOTE: ONLY used if blockCacheCompressed == null
+ @Deprecated private long blockCacheCompressedSize;
+ @Deprecated private int blockCacheCompressedNumShardBits;
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * The metadata that describes a column family.
+ */
+public class ColumnFamilyMetaData {
+ private final long size;
+ private final long fileCount;
+ private final byte[] name;
+ private final LevelMetaData[] levels;
+
+ /**
+ * Called from JNI C++
+ */
+ private ColumnFamilyMetaData(
+ final long size,
+ final long fileCount,
+ final byte[] name,
+ final LevelMetaData[] levels) {
+ this.size = size;
+ this.fileCount = fileCount;
+ this.name = name;
+ this.levels = levels;
+ }
+
+ /**
+ * The size of this column family in bytes, which is equal to the sum of
+ * the file size of its {@link #levels()}.
+ *
+ * @return the size of this column family
+ */
+ public long size() {
+ return size;
+ }
+
+ /**
+ * The number of files in this column family.
+ *
+ * @return the number of files
+ */
+ public long fileCount() {
+ return fileCount;
+ }
+
+ /**
+ * The name of the column family.
+ *
+ * @return the name
+ */
+ public byte[] name() {
+ return name;
+ }
+
+ /**
+ * The metadata of all levels in this column family.
+ *
+ * @return the levels metadata
+ */
+ public List<LevelMetaData> levels() {
+ return Arrays.asList(levels);
+ }
+}
this.compactionFilterFactory_ = other.compactionFilterFactory_;
this.compactionOptionsUniversal_ = other.compactionOptionsUniversal_;
this.compactionOptionsFIFO_ = other.compactionOptionsFIFO_;
+ this.bottommostCompressionOptions_ = other.bottommostCompressionOptions_;
this.compressionOptions_ = other.compressionOptions_;
}
+ /**
+ * Constructor from Options
+ *
+ * @param options The options.
+ */
+ public ColumnFamilyOptions(final Options options) {
+ super(newColumnFamilyOptionsFromOptions(options.nativeHandle_));
+ }
+
/**
* <p>Constructor to be used by
* {@link #getColumnFamilyOptionsFromProps(java.util.Properties)},
bottommostCompressionType(nativeHandle_));
}
+ @Override
+ public ColumnFamilyOptions setBottommostCompressionOptions(
+ final CompressionOptions bottommostCompressionOptions) {
+ setBottommostCompressionOptions(nativeHandle_,
+ bottommostCompressionOptions.nativeHandle_);
+ this.bottommostCompressionOptions_ = bottommostCompressionOptions;
+ return this;
+ }
+
+ @Override
+ public CompressionOptions bottommostCompressionOptions() {
+ return this.bottommostCompressionOptions_;
+ }
+
@Override
public ColumnFamilyOptions setCompressionOptions(
final CompressionOptions compressionOptions) {
@Override
public CompactionStyle compactionStyle() {
- return CompactionStyle.values()[compactionStyle(nativeHandle_)];
+ return CompactionStyle.fromValue(compactionStyle(nativeHandle_));
}
@Override
return reportBgIoStats(nativeHandle_);
}
+ @Override
+ public ColumnFamilyOptions setTtl(final long ttl) {
+ setTtl(nativeHandle_, ttl);
+ return this;
+ }
+
+ @Override
+ public long ttl() {
+ return ttl(nativeHandle_);
+ }
+
@Override
public ColumnFamilyOptions setCompactionOptionsUniversal(
final CompactionOptionsUniversal compactionOptionsUniversal) {
String optString);
private static native long newColumnFamilyOptions();
- private static native long copyColumnFamilyOptions(long handle);
+ private static native long copyColumnFamilyOptions(final long handle);
+ private static native long newColumnFamilyOptionsFromOptions(
+ final long optionsHandle);
@Override protected final native void disposeInternal(final long handle);
private native void optimizeForSmallDb(final long handle);
private native void setBottommostCompressionType(long handle,
byte bottommostCompressionType);
private native byte bottommostCompressionType(long handle);
+ private native void setBottommostCompressionOptions(final long handle,
+ final long bottommostCompressionOptionsHandle);
private native void setCompressionOptions(long handle,
long compressionOptionsHandle);
private native void useFixedLengthPrefixExtractor(
private native void setReportBgIoStats(final long handle,
final boolean reportBgIoStats);
private native boolean reportBgIoStats(final long handle);
+ private native void setTtl(final long handle, final long ttl);
+ private native long ttl(final long handle);
private native void setCompactionOptionsUniversal(final long handle,
final long compactionOptionsUniversalHandle);
private native void setCompactionOptionsFIFO(final long handle,
compactionFilterFactory_;
private CompactionOptionsUniversal compactionOptionsUniversal_;
private CompactionOptionsFIFO compactionOptionsFIFO_;
+ private CompressionOptions bottommostCompressionOptions_;
private CompressionOptions compressionOptions_;
}
*/
CompressionType bottommostCompressionType();
+ /**
+ * Set the options for compression algorithms used by
+ * {@link #bottommostCompressionType()} if it is enabled.
+ *
+ * To enable it, please see the definition of
+ * {@link CompressionOptions}.
+ *
+ * @param compressionOptions the bottom most compression options.
+ *
+ * @return the reference of the current options.
+ */
+ T setBottommostCompressionOptions(
+ final CompressionOptions compressionOptions);
+
+ /**
+ * Get the bottom most compression options.
+ *
+ * See {@link #setBottommostCompressionOptions(CompressionOptions)}.
+ *
+ * @return the bottom most compression options.
+ */
+ CompressionOptions bottommostCompressionOptions();
/**
* Set the different options for compression algorithms
return this;
}
-
- /**
- * Returns the policy for compacting the bottommost level
- * @return The BottommostLevelCompaction policy
- */
- public BottommostLevelCompaction bottommostLevelCompaction() {
- return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_));
- }
-
- /**
- * Sets the policy for compacting the bottommost level
- *
- * @param bottommostLevelCompaction The policy for compacting the bottommost level
- * @return This CompactRangeOptions
- */
- public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) {
- setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue());
- return this;
- }
-
/**
* Returns whether compacted files will be moved to the minimum level capable of holding the data or given level
* (specified non-negative target_level).
return this;
}
+ /**
+ * Returns the policy for compacting the bottommost level
+ * @return The BottommostLevelCompaction policy
+ */
+ public BottommostLevelCompaction bottommostLevelCompaction() {
+ return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_));
+ }
+
+ /**
+ * Sets the policy for compacting the bottommost level
+ *
+ * @param bottommostLevelCompaction The policy for compacting the bottommost level
+ * @return This CompactRangeOptions
+ */
+ public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) {
+ setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue());
+ return this;
+ }
+
/**
* If true, compaction will execute immediately even if doing so would cause the DB to
* enter write stall mode. Otherwise, it'll sleep until load is low enough.
}
private native static long newCompactRangeOptions();
+ @Override protected final native void disposeInternal(final long handle);
+
private native boolean exclusiveManualCompaction(final long handle);
- private native void setExclusiveManualCompaction(final long handle, final boolean exclusive_manual_compaction);
- private native int bottommostLevelCompaction(final long handle);
- private native void setBottommostLevelCompaction(final long handle, final int bottommostLevelCompaction);
+ private native void setExclusiveManualCompaction(final long handle,
+ final boolean exclusive_manual_compaction);
private native boolean changeLevel(final long handle);
- private native void setChangeLevel(final long handle, final boolean changeLevel);
+ private native void setChangeLevel(final long handle,
+ final boolean changeLevel);
private native int targetLevel(final long handle);
- private native void setTargetLevel(final long handle, final int targetLevel);
+ private native void setTargetLevel(final long handle,
+ final int targetLevel);
private native int targetPathId(final long handle);
- private native void setTargetPathId(final long handle, final int /* uint32_t */ targetPathId);
+ private native void setTargetPathId(final long handle,
+ final int targetPathId);
+ private native int bottommostLevelCompaction(final long handle);
+ private native void setBottommostLevelCompaction(final long handle,
+ final int bottommostLevelCompaction);
private native boolean allowWriteStall(final long handle);
- private native void setAllowWriteStall(final long handle, final boolean allowWriteStall);
- private native void setMaxSubcompactions(final long handle, final int /* uint32_t */ maxSubcompactions);
+ private native void setAllowWriteStall(final long handle,
+ final boolean allowWriteStall);
+ private native void setMaxSubcompactions(final long handle,
+ final int maxSubcompactions);
private native int maxSubcompactions(final long handle);
-
- @Override
- protected final native void disposeInternal(final long handle);
-
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+public class CompactionJobInfo extends RocksObject {
+
+ public CompactionJobInfo() {
+ super(newCompactionJobInfo());
+ }
+
+ /**
+ * Private as called from JNI C++
+ */
+ private CompactionJobInfo(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Get the name of the column family where the compaction happened.
+ *
+ * @return the name of the column family
+ */
+ public byte[] columnFamilyName() {
+ return columnFamilyName(nativeHandle_);
+ }
+
+ /**
+ * Get the status indicating whether the compaction was successful or not.
+ *
+ * @return the status
+ */
+ public Status status() {
+ return status(nativeHandle_);
+ }
+
+ /**
+ * Get the id of the thread that completed this compaction job.
+ *
+ * @return the id of the thread
+ */
+ public long threadId() {
+ return threadId(nativeHandle_);
+ }
+
+ /**
+ * Get the job id, which is unique in the same thread.
+ *
+ * @return the id of the thread
+ */
+ public int jobId() {
+ return jobId(nativeHandle_);
+ }
+
+ /**
+ * Get the smallest input level of the compaction.
+ *
+ * @return the input level
+ */
+ public int baseInputLevel() {
+ return baseInputLevel(nativeHandle_);
+ }
+
+ /**
+ * Get the output level of the compaction.
+ *
+ * @return the output level
+ */
+ public int outputLevel() {
+ return outputLevel(nativeHandle_);
+ }
+
+ /**
+ * Get the names of the compaction input files.
+ *
+ * @return the names of the input files.
+ */
+ public List<String> inputFiles() {
+ return Arrays.asList(inputFiles(nativeHandle_));
+ }
+
+ /**
+ * Get the names of the compaction output files.
+ *
+ * @return the names of the output files.
+ */
+ public List<String> outputFiles() {
+ return Arrays.asList(outputFiles(nativeHandle_));
+ }
+
+ /**
+ * Get the table properties for the input and output tables.
+ *
+ * The map is keyed by values from {@link #inputFiles()} and
+ * {@link #outputFiles()}.
+ *
+ * @return the table properties
+ */
+ public Map<String, TableProperties> tableProperties() {
+ return tableProperties(nativeHandle_);
+ }
+
+ /**
+ * Get the Reason for running the compaction.
+ *
+ * @return the reason.
+ */
+ public CompactionReason compactionReason() {
+ return CompactionReason.fromValue(compactionReason(nativeHandle_));
+ }
+
+ //
+ /**
+ * Get the compression algorithm used for output files.
+ *
+ * @return the compression algorithm
+ */
+ public CompressionType compression() {
+ return CompressionType.getCompressionType(compression(nativeHandle_));
+ }
+
+ /**
+ * Get detailed information about this compaction.
+ *
+ * @return the detailed information, or null if not available.
+ */
+ public /* @Nullable */ CompactionJobStats stats() {
+ final long statsHandle = stats(nativeHandle_);
+ if (statsHandle == 0) {
+ return null;
+ }
+
+ return new CompactionJobStats(statsHandle);
+ }
+
+
+ private static native long newCompactionJobInfo();
+ @Override protected native void disposeInternal(final long handle);
+
+ private static native byte[] columnFamilyName(final long handle);
+ private static native Status status(final long handle);
+ private static native long threadId(final long handle);
+ private static native int jobId(final long handle);
+ private static native int baseInputLevel(final long handle);
+ private static native int outputLevel(final long handle);
+ private static native String[] inputFiles(final long handle);
+ private static native String[] outputFiles(final long handle);
+ private static native Map<String, TableProperties> tableProperties(
+ final long handle);
+ private static native byte compactionReason(final long handle);
+ private static native byte compression(final long handle);
+ private static native long stats(final long handle);
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class CompactionJobStats extends RocksObject {
+
+ public CompactionJobStats() {
+ super(newCompactionJobStats());
+ }
+
+ /**
+ * Private as called from JNI C++
+ */
+ CompactionJobStats(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
+ /**
+ * Reset the stats.
+ */
+ public void reset() {
+ reset(nativeHandle_);
+ }
+
+ /**
+ * Aggregate the CompactionJobStats from another instance with this one.
+ *
+ * @param compactionJobStats another instance of stats.
+ */
+ public void add(final CompactionJobStats compactionJobStats) {
+ add(nativeHandle_, compactionJobStats.nativeHandle_);
+ }
+
+ /**
+ * Get the elapsed time in micro of this compaction.
+ *
+ * @return the elapsed time in micro of this compaction.
+ */
+ public long elapsedMicros() {
+ return elapsedMicros(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction input records.
+ *
+ * @return the number of compaction input records.
+ */
+ public long numInputRecords() {
+ return numInputRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction input files.
+ *
+ * @return the number of compaction input files.
+ */
+ public long numInputFiles() {
+ return numInputFiles(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction input files at the output level.
+ *
+ * @return the number of compaction input files at the output level.
+ */
+ public long numInputFilesAtOutputLevel() {
+ return numInputFilesAtOutputLevel(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction output records.
+ *
+ * @return the number of compaction output records.
+ */
+ public long numOutputRecords() {
+ return numOutputRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of compaction output files.
+ *
+ * @return the number of compaction output files.
+ */
+ public long numOutputFiles() {
+ return numOutputFiles(nativeHandle_);
+ }
+
+ /**
+ * Determine if the compaction is a manual compaction.
+ *
+ * @return true if the compaction is a manual compaction, false otherwise.
+ */
+ public boolean isManualCompaction() {
+ return isManualCompaction(nativeHandle_);
+ }
+
+ /**
+ * Get the size of the compaction input in bytes.
+ *
+ * @return the size of the compaction input in bytes.
+ */
+ public long totalInputBytes() {
+ return totalInputBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the size of the compaction output in bytes.
+ *
+ * @return the size of the compaction output in bytes.
+ */
+ public long totalOutputBytes() {
+ return totalOutputBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the number of records being replaced by newer record associated
+ * with same key.
+ *
+ * This could be a new value or a deletion entry for that key so this field
+ * sums up all updated and deleted keys.
+ *
+ * @return the number of records being replaced by newer record associated
+ * with same key.
+ */
+ public long numRecordsReplaced() {
+ return numRecordsReplaced(nativeHandle_);
+ }
+
+ /**
+ * Get the sum of the uncompressed input keys in bytes.
+ *
+ * @return the sum of the uncompressed input keys in bytes.
+ */
+ public long totalInputRawKeyBytes() {
+ return totalInputRawKeyBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the sum of the uncompressed input values in bytes.
+ *
+ * @return the sum of the uncompressed input values in bytes.
+ */
+ public long totalInputRawValueBytes() {
+ return totalInputRawValueBytes(nativeHandle_);
+ }
+
+ /**
+ * Get the number of deletion entries before compaction.
+ *
+ * Deletion entries can disappear after compaction because they expired.
+ *
+ * @return the number of deletion entries before compaction.
+ */
+ public long numInputDeletionRecords() {
+ return numInputDeletionRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of deletion records that were found obsolete and discarded
+ * because it is not possible to delete any more keys with this entry.
+ * (i.e. all possible deletions resulting from it have been completed)
+ *
+ * @return the number of deletion records that were found obsolete and
+ * discarded.
+ */
+ public long numExpiredDeletionRecords() {
+ return numExpiredDeletionRecords(nativeHandle_);
+ }
+
+ /**
+ * Get the number of corrupt keys (ParseInternalKey returned false when
+ * applied to the key) encountered and written out.
+ *
+ * @return the number of corrupt keys.
+ */
+ public long numCorruptKeys() {
+ return numCorruptKeys(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on file's Append() call.
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on file's Append() call.
+ */
+ public long fileWriteNanos() {
+ return fileWriteNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on sync file range.
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on sync file range.
+ */
+ public long fileRangeSyncNanos() {
+ return fileRangeSyncNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on file fsync.
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on file fsync.
+ */
+ public long fileFsyncNanos() {
+ return fileFsyncNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the Time spent on preparing file write (falocate, etc)
+ *
+ * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set.
+ *
+ * @return the Time spent on preparing file write (falocate, etc).
+ */
+ public long filePrepareWriteNanos() {
+ return filePrepareWriteNanos(nativeHandle_);
+ }
+
+ /**
+ * Get the smallest output key prefix.
+ *
+ * @return the smallest output key prefix.
+ */
+ public byte[] smallestOutputKeyPrefix() {
+ return smallestOutputKeyPrefix(nativeHandle_);
+ }
+
+ /**
+ * Get the largest output key prefix.
+ *
+ * @return the smallest output key prefix.
+ */
+ public byte[] largestOutputKeyPrefix() {
+ return largestOutputKeyPrefix(nativeHandle_);
+ }
+
+ /**
+ * Get the number of single-deletes which do not meet a put.
+ *
+ * @return number of single-deletes which do not meet a put.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public long numSingleDelFallthru() {
+ return numSingleDelFallthru(nativeHandle_);
+ }
+
+ /**
+ * Get the number of single-deletes which meet something other than a put.
+ *
+ * @return the number of single-deletes which meet something other than a put.
+ */
+ @Experimental("Performance optimization for a very specific workload")
+ public long numSingleDelMismatch() {
+ return numSingleDelMismatch(nativeHandle_);
+ }
+
+ private static native long newCompactionJobStats();
+ @Override protected native void disposeInternal(final long handle);
+
+
+ private static native void reset(final long handle);
+ private static native void add(final long handle,
+ final long compactionJobStatsHandle);
+ private static native long elapsedMicros(final long handle);
+ private static native long numInputRecords(final long handle);
+ private static native long numInputFiles(final long handle);
+ private static native long numInputFilesAtOutputLevel(final long handle);
+ private static native long numOutputRecords(final long handle);
+ private static native long numOutputFiles(final long handle);
+ private static native boolean isManualCompaction(final long handle);
+ private static native long totalInputBytes(final long handle);
+ private static native long totalOutputBytes(final long handle);
+ private static native long numRecordsReplaced(final long handle);
+ private static native long totalInputRawKeyBytes(final long handle);
+ private static native long totalInputRawValueBytes(final long handle);
+ private static native long numInputDeletionRecords(final long handle);
+ private static native long numExpiredDeletionRecords(final long handle);
+ private static native long numCorruptKeys(final long handle);
+ private static native long fileWriteNanos(final long handle);
+ private static native long fileRangeSyncNanos(final long handle);
+ private static native long fileFsyncNanos(final long handle);
+ private static native long filePrepareWriteNanos(final long handle);
+ private static native byte[] smallestOutputKeyPrefix(final long handle);
+ private static native byte[] largestOutputKeyPrefix(final long handle);
+ private static native long numSingleDelFallthru(final long handle);
+ private static native long numSingleDelMismatch(final long handle);
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * CompactionOptions are used in
+ * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)}
+ * calls.
+ */
+public class CompactionOptions extends RocksObject {
+
+ public CompactionOptions() {
+ super(newCompactionOptions());
+ }
+
+ /**
+ * Get the compaction output compression type.
+ *
+ * See {@link #setCompression(CompressionType)}.
+ *
+ * @return the compression type.
+ */
+ public CompressionType compression() {
+ return CompressionType.getCompressionType(
+ compression(nativeHandle_));
+ }
+
+ /**
+ * Set the compaction output compression type.
+ *
+ * Default: snappy
+ *
+ * If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION},
+ * RocksDB will choose compression type according to the
+ * {@link ColumnFamilyOptions#compressionType()}, taking into account
+ * the output level if {@link ColumnFamilyOptions#compressionPerLevel()}
+ * is specified.
+ *
+ * @param compression the compression type to use for compaction output.
+ *
+ * @return the instance of the current Options.
+ */
+ public CompactionOptions setCompression(final CompressionType compression) {
+ setCompression(nativeHandle_, compression.getValue());
+ return this;
+ }
+
+ /**
+ * Get the compaction output file size limit.
+ *
+ * See {@link #setOutputFileSizeLimit(long)}.
+ *
+ * @return the file size limit.
+ */
+ public long outputFileSizeLimit() {
+ return outputFileSizeLimit(nativeHandle_);
+ }
+
+ /**
+ * Compaction will create files of size {@link #outputFileSizeLimit()}.
+ *
+ * Default: 2^64-1, which means that compaction will create a single file
+ *
+ * @param outputFileSizeLimit the size limit
+ *
+ * @return the instance of the current Options.
+ */
+ public CompactionOptions setOutputFileSizeLimit(
+ final long outputFileSizeLimit) {
+ setOutputFileSizeLimit(nativeHandle_, outputFileSizeLimit);
+ return this;
+ }
+
+ /**
+ * Get the maximum number of threads that will concurrently perform a
+ * compaction job.
+ *
+ * @return the maximum number of threads.
+ */
+ public int maxSubcompactions() {
+ return maxSubcompactions(nativeHandle_);
+ }
+
+ /**
+ * This value represents the maximum number of threads that will
+ * concurrently perform a compaction job by breaking it into multiple,
+ * smaller ones that are run simultaneously.
+ *
+ * Default: 0 (i.e. no subcompactions)
+ *
+ * If > 0, it will replace the option in
+ * {@link DBOptions#maxSubcompactions()} for this compaction.
+ *
+ * @param maxSubcompactions The maximum number of threads that will
+ * concurrently perform a compaction job
+ *
+ * @return the instance of the current Options.
+ */
+ public CompactionOptions setMaxSubcompactions(final int maxSubcompactions) {
+ setMaxSubcompactions(nativeHandle_, maxSubcompactions);
+ return this;
+ }
+
+ private static native long newCompactionOptions();
+ @Override protected final native void disposeInternal(final long handle);
+
+ private static native byte compression(final long handle);
+ private static native void setCompression(final long handle,
+ final byte compressionTypeValue);
+ private static native long outputFileSizeLimit(final long handle);
+ private static native void setOutputFileSizeLimit(final long handle,
+ final long outputFileSizeLimit);
+ private static native int maxSubcompactions(final long handle);
+ private static native void setMaxSubcompactions(final long handle,
+ final int maxSubcompactions);
+}
*
* Default: false
*
- * @param allowCompaction should allow intra-L0 compaction?
+ * @param allowCompaction true to allow intra-L0 compaction
*
* @return the reference to the current options.
*/
- public CompactionOptionsFIFO setAllowCompaction(final boolean allowCompaction) {
+ public CompactionOptionsFIFO setAllowCompaction(
+ final boolean allowCompaction) {
setAllowCompaction(nativeHandle_, allowCompaction);
return this;
}
+
/**
* Check if intra-L0 compaction is enabled.
- * If true, try to do compaction to compact smaller files into larger ones.
- * Minimum files to compact follows options.level0_file_num_compaction_trigger
- * and compaction won't trigger if average compact bytes per del file is
- * larger than options.write_buffer_size. This is to protect large files
- * from being compacted again.
+ * When enabled, we try to compact smaller files into larger ones.
+ *
+ * See {@link #setAllowCompaction(boolean)}.
*
* Default: false
*
- * @return a boolean value indicating whether intra-L0 compaction is enabled
+ * @return true if intra-L0 compaction is enabled, false otherwise.
*/
public boolean allowCompaction() {
return allowCompaction(nativeHandle_);
}
- private native void setMaxTableFilesSize(long handle, long maxTableFilesSize);
- private native long maxTableFilesSize(long handle);
- private native void setAllowCompaction(long handle, boolean allowCompaction);
- private native boolean allowCompaction(long handle);
private native static long newCompactionOptionsFIFO();
@Override protected final native void disposeInternal(final long handle);
+
+ private native void setMaxTableFilesSize(final long handle,
+ final long maxTableFilesSize);
+ private native long maxTableFilesSize(final long handle);
+ private native void setAllowCompaction(final long handle,
+ final boolean allowCompaction);
+ private native boolean allowCompaction(final long handle);
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public enum CompactionReason {
+ kUnknown((byte)0x0),
+
+ /**
+ * [Level] number of L0 files > level0_file_num_compaction_trigger
+ */
+ kLevelL0FilesNum((byte)0x1),
+
+ /**
+ * [Level] total size of level > MaxBytesForLevel()
+ */
+ kLevelMaxLevelSize((byte)0x2),
+
+ /**
+ * [Universal] Compacting for size amplification
+ */
+ kUniversalSizeAmplification((byte)0x3),
+
+ /**
+ * [Universal] Compacting for size ratio
+ */
+ kUniversalSizeRatio((byte)0x4),
+
+ /**
+ * [Universal] number of sorted runs > level0_file_num_compaction_trigger
+ */
+ kUniversalSortedRunNum((byte)0x5),
+
+ /**
+ * [FIFO] total size > max_table_files_size
+ */
+ kFIFOMaxSize((byte)0x6),
+
+ /**
+ * [FIFO] reduce number of files.
+ */
+ kFIFOReduceNumFiles((byte)0x7),
+
+ /**
+ * [FIFO] files with creation time < (current_time - interval)
+ */
+ kFIFOTtl((byte)0x8),
+
+ /**
+ * Manual compaction
+ */
+ kManualCompaction((byte)0x9),
+
+ /**
+ * DB::SuggestCompactRange() marked files for compaction
+ */
+ kFilesMarkedForCompaction((byte)0x10),
+
+ /**
+ * [Level] Automatic compaction within bottommost level to cleanup duplicate
+ * versions of same user key, usually due to a released snapshot.
+ */
+ kBottommostFiles((byte)0x0A),
+
+ /**
+ * Compaction based on TTL
+ */
+ kTtl((byte)0x0B),
+
+ /**
+ * According to the comments in flush_job.cc, RocksDB treats flush as
+ * a level 0 compaction in internal stats.
+ */
+ kFlush((byte)0x0C),
+
+ /**
+ * Compaction caused by external sst file ingestion
+ */
+ kExternalSstIngestion((byte)0x0D);
+
+ private final byte value;
+
+ CompactionReason(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the CompactionReason from the internal representation value.
+ *
+ * @return the compaction reason.
+ *
+ * @throws IllegalArgumentException if the value is unknown.
+ */
+ static CompactionReason fromValue(final byte value) {
+ for (final CompactionReason compactionReason : CompactionReason.values()) {
+ if(compactionReason.value == value) {
+ return compactionReason;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for CompactionReason: " + value);
+ }
+}
package org.rocksdb;
+import java.util.List;
+
/**
* Enum CompactionStyle
*
* compaction strategy. It is suited for keeping event log data with
* very low overhead (query log for example). It periodically deletes
* the old data, so it's basically a TTL compaction style.</li>
+ * <li><strong>NONE</strong> - Disable background compaction.
+ * Compaction jobs are submitted
+ * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} ()}.</li>
* </ol>
*
* @see <a
* FIFO Compaction</a>
*/
public enum CompactionStyle {
- LEVEL((byte) 0),
- UNIVERSAL((byte) 1),
- FIFO((byte) 2);
+ LEVEL((byte) 0x0),
+ UNIVERSAL((byte) 0x1),
+ FIFO((byte) 0x2),
+ NONE((byte) 0x3);
- private final byte value_;
+ private final byte value;
- private CompactionStyle(byte value) {
- value_ = value;
+ CompactionStyle(final byte value) {
+ this.value = value;
}
/**
- * Returns the byte value of the enumerations value
+ * Get the internal representation value.
*
- * @return byte representation
+ * @return the internal representation value.
*/
+ //TODO(AR) should be made package-private
public byte getValue() {
- return value_;
+ return value;
+ }
+
+ /**
+ * Get the Compaction style from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the Compaction style
+ *
+ * @throws IllegalArgumentException if the value does not match a
+ * CompactionStyle
+ */
+ static CompactionStyle fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final CompactionStyle compactionStyle : CompactionStyle.values()) {
+ if (compactionStyle.value == value) {
+ return compactionStyle;
+ }
+ }
+ throw new IllegalArgumentException("Unknown value for CompactionStyle: "
+ + value);
}
}
return maxDictBytes(nativeHandle_);
}
+ /**
+ * Maximum size of training data passed to zstd's dictionary trainer. Using
+ * zstd's dictionary trainer can achieve even better compression ratio
+ * improvements than using {@link #setMaxDictBytes(int)} alone.
+ *
+ * The training data will be used to generate a dictionary
+ * of {@link #maxDictBytes()}.
+ *
+ * Default: 0.
+ *
+ * @param zstdMaxTrainBytes Maximum bytes to use for training ZStd.
+ *
+ * @return the reference to the current options
+ */
+ public CompressionOptions setZStdMaxTrainBytes(final int zstdMaxTrainBytes) {
+ setZstdMaxTrainBytes(nativeHandle_, zstdMaxTrainBytes);
+ return this;
+ }
+
+ /**
+ * Maximum size of training data passed to zstd's dictionary trainer.
+ *
+ * @return Maximum bytes to use for training ZStd
+ */
+ public int zstdMaxTrainBytes() {
+ return zstdMaxTrainBytes(nativeHandle_);
+ }
+
+ /**
+ * When the compression options are set by the user, it will be set to "true".
+ * For bottommost_compression_opts, to enable it, user must set enabled=true.
+ * Otherwise, bottommost compression will use compression_opts as default
+ * compression options.
+ *
+ * For compression_opts, if compression_opts.enabled=false, it is still
+ * used as compression options for compression process.
+ *
+ * Default: false.
+ *
+ * @param enabled true to use these compression options
+ * for the bottommost_compression_opts, false otherwise
+ *
+ * @return the reference to the current options
+ */
+ public CompressionOptions setEnabled(final boolean enabled) {
+ setEnabled(nativeHandle_, enabled);
+ return this;
+ }
+
+ /**
+ * Determine whether these compression options
+ * are used for the bottommost_compression_opts.
+ *
+ * @return true if these compression options are used
+ * for the bottommost_compression_opts, false otherwise
+ */
+ public boolean enabled() {
+ return enabled(nativeHandle_);
+ }
+
+
private native static long newCompressionOptions();
@Override protected final native void disposeInternal(final long handle);
private native int strategy(final long handle);
private native void setMaxDictBytes(final long handle, final int maxDictBytes);
private native int maxDictBytes(final long handle);
+ private native void setZstdMaxTrainBytes(final long handle,
+ final int zstdMaxTrainBytes);
+ private native int zstdMaxTrainBytes(final long handle);
+ private native void setEnabled(final long handle, final boolean enabled);
+ private native boolean enabled(final long handle);
}
* If {@link #dispose()} function is not called, then it will be GC'd
* automatically and native resources will be released as part of the process.
*/
-public class DBOptions
- extends RocksObject implements DBOptionsInterface<DBOptions> {
+public class DBOptions extends RocksObject
+ implements DBOptionsInterface<DBOptions>,
+ MutableDBOptionsInterface<DBOptions> {
static {
RocksDB.loadLibrary();
}
this.numShardBits_ = other.numShardBits_;
this.rateLimiter_ = other.rateLimiter_;
this.rowCache_ = other.rowCache_;
+ this.walFilter_ = other.walFilter_;
this.writeBufferManager_ = other.writeBufferManager_;
}
+ /**
+ * Constructor from Options
+ *
+ * @param options The options.
+ */
+ public DBOptions(final Options options) {
+ super(newDBOptionsFromOptions(options.nativeHandle_));
+ }
+
/**
* <p>Method to get a options instance by using pre-configured
* property values. If one or many values are undefined in
return createMissingColumnFamilies(nativeHandle_);
}
- @Override
- public DBOptions setEnv(final Env env) {
- setEnv(nativeHandle_, env.nativeHandle_);
- this.env_ = env;
- return this;
- }
-
- @Override
- public Env getEnv() {
- return env_;
- }
-
@Override
public DBOptions setErrorIfExists(
final boolean errorIfExists) {
return paranoidChecks(nativeHandle_);
}
+ @Override
+ public DBOptions setEnv(final Env env) {
+ setEnv(nativeHandle_, env.nativeHandle_);
+ this.env_ = env;
+ return this;
+ }
+
+ @Override
+ public Env getEnv() {
+ return env_;
+ }
+
@Override
public DBOptions setRateLimiter(final RateLimiter rateLimiter) {
assert(isOwningHandle());
assert(isOwningHandle());
final int len = dbPaths.size();
- final String paths[] = new String[len];
- final long targetSizes[] = new long[len];
+ final String[] paths = new String[len];
+ final long[] targetSizes = new long[len];
int i = 0;
for(final DbPath dbPath : dbPaths) {
if(len == 0) {
return Collections.emptyList();
} else {
- final String paths[] = new String[len];
- final long targetSizes[] = new long[len];
+ final String[] paths = new String[len];
+ final long[] targetSizes = new long[len];
dbPaths(nativeHandle_, paths, targetSizes);
return deleteObsoleteFilesPeriodMicros(nativeHandle_);
}
+ @Override
+ public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) {
+ assert(isOwningHandle());
+ setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs);
+ return this;
+ }
+
+ @Override
+ public int maxBackgroundJobs() {
+ assert(isOwningHandle());
+ return maxBackgroundJobs(nativeHandle_);
+ }
+
@Override
public void setBaseBackgroundCompactions(
final int baseBackgroundCompactions) {
}
@Override
- public void setMaxSubcompactions(final int maxSubcompactions) {
+ public DBOptions setMaxSubcompactions(final int maxSubcompactions) {
assert(isOwningHandle());
setMaxSubcompactions(nativeHandle_, maxSubcompactions);
+ return this;
}
@Override
return maxBackgroundFlushes(nativeHandle_);
}
- @Override
- public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) {
- assert(isOwningHandle());
- setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs);
- return this;
- }
-
- @Override
- public int maxBackgroundJobs() {
- assert(isOwningHandle());
- return maxBackgroundJobs(nativeHandle_);
- }
-
@Override
public DBOptions setMaxLogFileSize(final long maxLogFileSize) {
assert(isOwningHandle());
}
@Override
- public DBOptions setUseDirectReads(
- final boolean useDirectReads) {
+ public DBOptions setAllowMmapReads(
+ final boolean allowMmapReads) {
assert(isOwningHandle());
- setUseDirectReads(nativeHandle_, useDirectReads);
+ setAllowMmapReads(nativeHandle_, allowMmapReads);
return this;
}
@Override
- public boolean useDirectReads() {
+ public boolean allowMmapReads() {
assert(isOwningHandle());
- return useDirectReads(nativeHandle_);
+ return allowMmapReads(nativeHandle_);
}
@Override
- public DBOptions setUseDirectIoForFlushAndCompaction(
- final boolean useDirectIoForFlushAndCompaction) {
+ public DBOptions setAllowMmapWrites(
+ final boolean allowMmapWrites) {
assert(isOwningHandle());
- setUseDirectIoForFlushAndCompaction(nativeHandle_,
- useDirectIoForFlushAndCompaction);
+ setAllowMmapWrites(nativeHandle_, allowMmapWrites);
return this;
}
@Override
- public boolean useDirectIoForFlushAndCompaction() {
+ public boolean allowMmapWrites() {
assert(isOwningHandle());
- return useDirectIoForFlushAndCompaction(nativeHandle_);
+ return allowMmapWrites(nativeHandle_);
}
@Override
- public DBOptions setAllowFAllocate(final boolean allowFAllocate) {
+ public DBOptions setUseDirectReads(
+ final boolean useDirectReads) {
assert(isOwningHandle());
- setAllowFAllocate(nativeHandle_, allowFAllocate);
+ setUseDirectReads(nativeHandle_, useDirectReads);
return this;
}
@Override
- public boolean allowFAllocate() {
+ public boolean useDirectReads() {
assert(isOwningHandle());
- return allowFAllocate(nativeHandle_);
+ return useDirectReads(nativeHandle_);
}
@Override
- public DBOptions setAllowMmapReads(
- final boolean allowMmapReads) {
+ public DBOptions setUseDirectIoForFlushAndCompaction(
+ final boolean useDirectIoForFlushAndCompaction) {
assert(isOwningHandle());
- setAllowMmapReads(nativeHandle_, allowMmapReads);
+ setUseDirectIoForFlushAndCompaction(nativeHandle_,
+ useDirectIoForFlushAndCompaction);
return this;
}
@Override
- public boolean allowMmapReads() {
+ public boolean useDirectIoForFlushAndCompaction() {
assert(isOwningHandle());
- return allowMmapReads(nativeHandle_);
+ return useDirectIoForFlushAndCompaction(nativeHandle_);
}
@Override
- public DBOptions setAllowMmapWrites(
- final boolean allowMmapWrites) {
+ public DBOptions setAllowFAllocate(final boolean allowFAllocate) {
assert(isOwningHandle());
- setAllowMmapWrites(nativeHandle_, allowMmapWrites);
+ setAllowFAllocate(nativeHandle_, allowFAllocate);
return this;
}
@Override
- public boolean allowMmapWrites() {
+ public boolean allowFAllocate() {
assert(isOwningHandle());
- return allowMmapWrites(nativeHandle_);
+ return allowFAllocate(nativeHandle_);
}
@Override
return this.writeBufferManager_;
}
- @Override
+ @Override
public long dbWriteBufferSize() {
assert(isOwningHandle());
return dbWriteBufferSize(nativeHandle_);
return walBytesPerSync(nativeHandle_);
}
+ //TODO(AR) NOW
+// @Override
+// public DBOptions setListeners(final List<EventListener> listeners) {
+// assert(isOwningHandle());
+// final long[] eventListenerHandlers = new long[listeners.size()];
+// for (int i = 0; i < eventListenerHandlers.length; i++) {
+// eventListenerHandlers[i] = listeners.get(i).nativeHandle_;
+// }
+// setEventListeners(nativeHandle_, eventListenerHandlers);
+// return this;
+// }
+//
+// @Override
+// public Collection<EventListener> listeners() {
+// assert(isOwningHandle());
+// final long[] eventListenerHandlers = listeners(nativeHandle_);
+// if (eventListenerHandlers == null || eventListenerHandlers.length == 0) {
+// return Collections.emptyList();
+// }
+//
+// final List<EventListener> eventListeners = new ArrayList<>();
+// for (final long eventListenerHandle : eventListenerHandlers) {
+// eventListeners.add(new EventListener(eventListenerHandle)); //TODO(AR) check ownership is set to false!
+// }
+// return eventListeners;
+// }
+
@Override
public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) {
assert(isOwningHandle());
return delayedWriteRate(nativeHandle_);
}
+ @Override
+ public DBOptions setEnablePipelinedWrite(final boolean enablePipelinedWrite) {
+ assert(isOwningHandle());
+ setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite);
+ return this;
+ }
+
+ @Override
+ public boolean enablePipelinedWrite() {
+ assert(isOwningHandle());
+ return enablePipelinedWrite(nativeHandle_);
+ }
+
@Override
public DBOptions setAllowConcurrentMemtableWrite(
final boolean allowConcurrentMemtableWrite) {
return this.rowCache_;
}
+ @Override
+ public DBOptions setWalFilter(final AbstractWalFilter walFilter) {
+ assert(isOwningHandle());
+ setWalFilter(nativeHandle_, walFilter.nativeHandle_);
+ this.walFilter_ = walFilter;
+ return this;
+ }
+
+ @Override
+ public WalFilter walFilter() {
+ assert(isOwningHandle());
+ return this.walFilter_;
+ }
+
@Override
public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
assert(isOwningHandle());
return avoidFlushDuringShutdown(nativeHandle_);
}
+ @Override
+ public DBOptions setAllowIngestBehind(final boolean allowIngestBehind) {
+ assert(isOwningHandle());
+ setAllowIngestBehind(nativeHandle_, allowIngestBehind);
+ return this;
+ }
+
+ @Override
+ public boolean allowIngestBehind() {
+ assert(isOwningHandle());
+ return allowIngestBehind(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setPreserveDeletes(final boolean preserveDeletes) {
+ assert(isOwningHandle());
+ setPreserveDeletes(nativeHandle_, preserveDeletes);
+ return this;
+ }
+
+ @Override
+ public boolean preserveDeletes() {
+ assert(isOwningHandle());
+ return preserveDeletes(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setTwoWriteQueues(final boolean twoWriteQueues) {
+ assert(isOwningHandle());
+ setTwoWriteQueues(nativeHandle_, twoWriteQueues);
+ return this;
+ }
+
+ @Override
+ public boolean twoWriteQueues() {
+ assert(isOwningHandle());
+ return twoWriteQueues(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setManualWalFlush(final boolean manualWalFlush) {
+ assert(isOwningHandle());
+ setManualWalFlush(nativeHandle_, manualWalFlush);
+ return this;
+ }
+
+ @Override
+ public boolean manualWalFlush() {
+ assert(isOwningHandle());
+ return manualWalFlush(nativeHandle_);
+ }
+
+ @Override
+ public DBOptions setAtomicFlush(final boolean atomicFlush) {
+ setAtomicFlush(nativeHandle_, atomicFlush);
+ return this;
+ }
+
+ @Override
+ public boolean atomicFlush() {
+ return atomicFlush(nativeHandle_);
+ }
+
static final int DEFAULT_NUM_SHARD_BITS = -1;
private static native long getDBOptionsFromProps(
String optString);
- private native static long newDBOptions();
- private native static long copyDBOptions(long handle);
+ private static native long newDBOptions();
+ private static native long copyDBOptions(final long handle);
+ private static native long newDBOptionsFromOptions(final long optionsHandle);
@Override protected final native void disposeInternal(final long handle);
private native void optimizeForSmallDb(final long handle);
private native boolean enableThreadTracking(long handle);
private native void setDelayedWriteRate(long handle, long delayedWriteRate);
private native long delayedWriteRate(long handle);
+ private native void setEnablePipelinedWrite(final long handle,
+ final boolean enablePipelinedWrite);
+ private native boolean enablePipelinedWrite(final long handle);
private native void setAllowConcurrentMemtableWrite(long handle,
boolean allowConcurrentMemtableWrite);
private native boolean allowConcurrentMemtableWrite(long handle);
final boolean allow2pc);
private native boolean allow2pc(final long handle);
private native void setRowCache(final long handle,
- final long row_cache_handle);
+ final long rowCacheHandle);
+ private native void setWalFilter(final long handle,
+ final long walFilterHandle);
private native void setFailIfOptionsFileError(final long handle,
final boolean failIfOptionsFileError);
private native boolean failIfOptionsFileError(final long handle);
private native void setAvoidFlushDuringShutdown(final long handle,
final boolean avoidFlushDuringShutdown);
private native boolean avoidFlushDuringShutdown(final long handle);
+ private native void setAllowIngestBehind(final long handle,
+ final boolean allowIngestBehind);
+ private native boolean allowIngestBehind(final long handle);
+ private native void setPreserveDeletes(final long handle,
+ final boolean preserveDeletes);
+ private native boolean preserveDeletes(final long handle);
+ private native void setTwoWriteQueues(final long handle,
+ final boolean twoWriteQueues);
+ private native boolean twoWriteQueues(final long handle);
+ private native void setManualWalFlush(final long handle,
+ final boolean manualWalFlush);
+ private native boolean manualWalFlush(final long handle);
+ private native void setAtomicFlush(final long handle,
+ final boolean atomicFlush);
+ private native boolean atomicFlush(final long handle);
// instance variables
// NOTE: If you add new member variables, please update the copy constructor above!
private int numShardBits_;
private RateLimiter rateLimiter_;
private Cache rowCache_;
+ private WalFilter walFilter_;
private WriteBufferManager writeBufferManager_;
}
InfoLogLevel infoLogLevel();
/**
- * Number of open files that can be used by the DB. You may need to
- * increase this if your database has a large working set. Value -1 means
- * files opened are always kept open. You can estimate number of files based
- * on {@code target_file_size_base} and {@code target_file_size_multiplier}
- * for level-based compaction. For universal-style compaction, you can usually
- * set it to -1.
- * Default: 5000
- *
- * @param maxOpenFiles the maximum number of open files.
- * @return the instance of the current object.
- */
- T setMaxOpenFiles(int maxOpenFiles);
-
- /**
- * Number of open files that can be used by the DB. You may need to
- * increase this if your database has a large working set. Value -1 means
- * files opened are always kept open. You can estimate number of files based
- * on {@code target_file_size_base} and {@code target_file_size_multiplier}
- * for level-based compaction. For universal-style compaction, you can usually
- * set it to -1.
- *
- * @return the maximum number of open files.
- */
- int maxOpenFiles();
-
- /**
- * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You
- * can use this option to increase the number of threads used to open the
- * files.
+ * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open
+ * all files on DB::Open(). You can use this option to increase the number
+ * of threads used to open the files.
*
* Default: 16
*
T setMaxFileOpeningThreads(int maxFileOpeningThreads);
/**
- * If {@link #maxOpenFiles()} is -1, DB will open all files on DB::Open(). You
- * can use this option to increase the number of threads used to open the
- * files.
+ * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all
+ * files on DB::Open(). You can use this option to increase the number of
+ * threads used to open the files.
*
* Default: 16
*
*/
int maxFileOpeningThreads();
- /**
- * <p>Once write-ahead logs exceed this size, we will start forcing the
- * flush of column families whose memtables are backed by the oldest live
- * WAL file (i.e. the ones that are causing all the space amplification).
- * </p>
- * <p>If set to 0 (default), we will dynamically choose the WAL size limit to
- * be [sum of all write_buffer_size * max_write_buffer_number] * 2</p>
- * <p>This option takes effect only when there are more than one column family as
- * otherwise the wal size is dictated by the write_buffer_size.</p>
- * <p>Default: 0</p>
- *
- * @param maxTotalWalSize max total wal size.
- * @return the instance of the current object.
- */
- T setMaxTotalWalSize(long maxTotalWalSize);
-
- /**
- * <p>Returns the max total wal size. Once write-ahead logs exceed this size,
- * we will start forcing the flush of column families whose memtables are
- * backed by the oldest live WAL file (i.e. the ones that are causing all
- * the space amplification).</p>
- *
- * <p>If set to 0 (default), we will dynamically choose the WAL size limit
- * to be [sum of all write_buffer_size * max_write_buffer_number] * 2
- * </p>
- *
- * @return max total wal size
- */
- long maxTotalWalSize();
-
/**
* <p>Sets the statistics object which collects metrics about database operations.
* Statistics objects should not be shared between DB instances as
*/
long deleteObsoleteFilesPeriodMicros();
- /**
- * Suggested number of concurrent background compaction jobs, submitted to
- * the default LOW priority thread pool.
- * Default: 1
- *
- * @param baseBackgroundCompactions Suggested number of background compaction
- * jobs
- *
- * @deprecated Use {@link #setMaxBackgroundJobs(int)}
- */
- void setBaseBackgroundCompactions(int baseBackgroundCompactions);
-
- /**
- * Suggested number of concurrent background compaction jobs, submitted to
- * the default LOW priority thread pool.
- * Default: 1
- *
- * @return Suggested number of background compaction jobs
- */
- int baseBackgroundCompactions();
-
- /**
- * Specifies the maximum number of concurrent background compaction jobs,
- * submitted to the default LOW priority thread pool.
- * If you're increasing this, also consider increasing number of threads in
- * LOW priority thread pool. For more information, see
- * Default: 1
- *
- * @param maxBackgroundCompactions the maximum number of background
- * compaction jobs.
- * @return the instance of the current object.
- *
- * @see RocksEnv#setBackgroundThreads(int)
- * @see RocksEnv#setBackgroundThreads(int, int)
- * @see #maxBackgroundFlushes()
- */
- T setMaxBackgroundCompactions(int maxBackgroundCompactions);
-
- /**
- * Returns the maximum number of concurrent background compaction jobs,
- * submitted to the default LOW priority thread pool.
- * When increasing this number, we may also want to consider increasing
- * number of threads in LOW priority thread pool.
- * Default: 1
- *
- * @return the maximum number of concurrent background compaction jobs.
- * @see RocksEnv#setBackgroundThreads(int)
- * @see RocksEnv#setBackgroundThreads(int, int)
- *
- * @deprecated Use {@link #setMaxBackgroundJobs(int)}
- */
- int maxBackgroundCompactions();
-
/**
* This value represents the maximum number of threads that will
* concurrently perform a compaction job by breaking it into multiple,
*
* @param maxSubcompactions The maximum number of threads that will
* concurrently perform a compaction job
+ *
+ * @return the instance of the current object.
*/
- void setMaxSubcompactions(int maxSubcompactions);
+ T setMaxSubcompactions(int maxSubcompactions);
/**
* This value represents the maximum number of threads that will
* @return the instance of the current object.
*
* @see RocksEnv#setBackgroundThreads(int)
- * @see RocksEnv#setBackgroundThreads(int, int)
- * @see #maxBackgroundCompactions()
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
+ * @see MutableDBOptionsInterface#maxBackgroundCompactions()
*
- * @deprecated Use {@link #setMaxBackgroundJobs(int)}
+ * @deprecated Use {@link MutableDBOptionsInterface#setMaxBackgroundJobs(int)}
*/
+ @Deprecated
T setMaxBackgroundFlushes(int maxBackgroundFlushes);
/**
*
* @return the maximum number of concurrent background flush jobs.
* @see RocksEnv#setBackgroundThreads(int)
- * @see RocksEnv#setBackgroundThreads(int, int)
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
*/
+ @Deprecated
int maxBackgroundFlushes();
- /**
- * Specifies the maximum number of concurrent background jobs (both flushes
- * and compactions combined).
- * Default: 2
- *
- * @param maxBackgroundJobs number of max concurrent background jobs
- * @return the instance of the current object.
- */
- T setMaxBackgroundJobs(int maxBackgroundJobs);
-
- /**
- * Returns the maximum number of concurrent background jobs (both flushes
- * and compactions combined).
- * Default: 2
- *
- * @return the maximum number of concurrent background jobs.
- */
- int maxBackgroundJobs();
-
/**
* Specifies the maximum size of a info log file. If the current log file
* is larger than `max_log_file_size`, a new info log file will
*/
boolean isFdCloseOnExec();
- /**
- * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
- * Default: 600 (10 minutes)
- *
- * @param statsDumpPeriodSec time interval in seconds.
- * @return the instance of the current object.
- */
- T setStatsDumpPeriodSec(int statsDumpPeriodSec);
-
- /**
- * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
- * Default: 600 (10 minutes)
- *
- * @return time interval in seconds.
- */
- int statsDumpPeriodSec();
-
/**
* If set true, will hint the underlying file system that the file
* access pattern is random, when a sst file is opened.
*/
boolean newTableReaderForCompactionInputs();
- /**
- * If non-zero, we perform bigger reads when doing compaction. If you're
- * running RocksDB on spinning disks, you should set this to at least 2MB.
- *
- * That way RocksDB's compaction is doing sequential instead of random reads.
- * When non-zero, we also force {@link #newTableReaderForCompactionInputs()}
- * to true.
- *
- * Default: 0
- *
- * @param compactionReadaheadSize The compaction read-ahead size
- *
- * @return the reference to the current options.
- */
- T setCompactionReadaheadSize(final long compactionReadaheadSize);
-
- /**
- * If non-zero, we perform bigger reads when doing compaction. If you're
- * running RocksDB on spinning disks, you should set this to at least 2MB.
- *
- * That way RocksDB's compaction is doing sequential instead of random reads.
- * When non-zero, we also force {@link #newTableReaderForCompactionInputs()}
- * to true.
- *
- * Default: 0
- *
- * @return The compaction read-ahead size
- */
- long compactionReadaheadSize();
-
/**
* This is a maximum buffer size that is used by WinMmapReadableFile in
* unbuffered disk I/O mode. We need to maintain an aligned buffer for
* for bigger requests allocate one shot buffers. In unbuffered mode we
* always bypass read-ahead buffer at ReadaheadRandomAccessFile
* When read-ahead is required we then make use of
- * {@link #compactionReadaheadSize()} value and always try to read ahead.
+ * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and
+ * always try to read ahead.
* With read-ahead we always pre-allocate buffer to the size instead of
* growing it up to a limit.
*
* for bigger requests allocate one shot buffers. In unbuffered mode we
* always bypass read-ahead buffer at ReadaheadRandomAccessFile
* When read-ahead is required we then make use of
- * {@link #compactionReadaheadSize()} value and always try to read ahead.
- * With read-ahead we always pre-allocate buffer to the size instead of
- * growing it up to a limit.
+ * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and
+ * always try to read ahead. With read-ahead we always pre-allocate buffer
+ * to the size instead of growing it up to a limit.
*
* This option is currently honored only on Windows
*
*/
long randomAccessMaxBufferSize();
- /**
- * This is the maximum buffer size that is used by WritableFileWriter.
- * On Windows, we need to maintain an aligned buffer for writes.
- * We allow the buffer to grow until it's size hits the limit.
- *
- * Default: 1024 * 1024 (1 MB)
- *
- * @param writableFileMaxBufferSize the maximum buffer size
- *
- * @return the reference to the current options.
- */
- T setWritableFileMaxBufferSize(long writableFileMaxBufferSize);
-
- /**
- * This is the maximum buffer size that is used by WritableFileWriter.
- * On Windows, we need to maintain an aligned buffer for writes.
- * We allow the buffer to grow until it's size hits the limit.
- *
- * Default: 1024 * 1024 (1 MB)
- *
- * @return the maximum buffer size
- */
- long writableFileMaxBufferSize();
-
/**
* Use adaptive mutex, which spins in the user space before resorting
* to kernel. This could reduce context switch when the mutex is not
*/
boolean useAdaptiveMutex();
- /**
- * Allows OS to incrementally sync files to disk while they are being
- * written, asynchronously, in the background.
- * Issue one request for every bytes_per_sync written. 0 turns it off.
- * Default: 0
- *
- * @param bytesPerSync size in bytes
- * @return the instance of the current object.
- */
- T setBytesPerSync(long bytesPerSync);
-
- /**
- * Allows OS to incrementally sync files to disk while they are being
- * written, asynchronously, in the background.
- * Issue one request for every bytes_per_sync written. 0 turns it off.
- * Default: 0
- *
- * @return size in bytes
- */
- long bytesPerSync();
-
- /**
- * Same as {@link #setBytesPerSync(long)} , but applies to WAL files
- *
- * Default: 0, turned off
- *
- * @param walBytesPerSync size in bytes
- * @return the instance of the current object.
- */
- T setWalBytesPerSync(long walBytesPerSync);
-
- /**
- * Same as {@link #bytesPerSync()} , but applies to WAL files
- *
- * Default: 0, turned off
- *
- * @return size in bytes
- */
- long walBytesPerSync();
+ //TODO(AR) NOW
+// /**
+// * Sets the {@link EventListener}s whose callback functions
+// * will be called when specific RocksDB event happens.
+// *
+// * @param listeners the listeners who should be notified on various events.
+// *
+// * @return the instance of the current object.
+// */
+// T setListeners(final List<EventListener> listeners);
+//
+// /**
+// * Gets the {@link EventListener}s whose callback functions
+// * will be called when specific RocksDB event happens.
+// *
+// * @return a collection of Event listeners.
+// */
+// Collection<EventListener> listeners();
/**
* If true, then the status of the threads involved in this DB will
boolean enableThreadTracking();
/**
- * The limited write rate to DB if
- * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
- * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
- * or we are writing to the last mem table allowed and we allow more than 3
- * mem tables. It is calculated using size of user write requests before
- * compression. RocksDB may decide to slow down more if the compaction still
- * gets behind further.
+ * By default, a single write thread queue is maintained. The thread gets
+ * to the head of the queue becomes write batch group leader and responsible
+ * for writing to WAL and memtable for the batch group.
*
- * Unit: bytes per second.
+ * If {@link #enablePipelinedWrite()} is true, separate write thread queue is
+ * maintained for WAL write and memtable write. A write thread first enter WAL
+ * writer queue and then memtable writer queue. Pending thread on the WAL
+ * writer queue thus only have to wait for previous writers to finish their
+ * WAL writing but not the memtable writing. Enabling the feature may improve
+ * write throughput and reduce latency of the prepare phase of two-phase
+ * commit.
*
- * Default: 16MB/s
+ * Default: false
*
- * @param delayedWriteRate the rate in bytes per second
+ * @param enablePipelinedWrite true to enabled pipelined writes
*
* @return the reference to the current options.
*/
- T setDelayedWriteRate(long delayedWriteRate);
+ T setEnablePipelinedWrite(final boolean enablePipelinedWrite);
/**
- * The limited write rate to DB if
- * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
- * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
- * or we are writing to the last mem table allowed and we allow more than 3
- * mem tables. It is calculated using size of user write requests before
- * compression. RocksDB may decide to slow down more if the compaction still
- * gets behind further.
- *
- * Unit: bytes per second.
+ * Returns true if pipelined writes are enabled.
+ * See {@link #setEnablePipelinedWrite(boolean)}.
*
- * Default: 16MB/s
- *
- * @return the rate in bytes per second
+ * @return true if pipelined writes are enabled, false otherwise.
*/
- long delayedWriteRate();
+ boolean enablePipelinedWrite();
/**
* If true, allow multi-writers to update mem tables in parallel.
*/
Cache rowCache();
+ /**
+ * A filter object supplied to be invoked while processing write-ahead-logs
+ * (WALs) during recovery. The filter provides a way to inspect log
+ * records, ignoring a particular record or skipping replay.
+ * The filter is invoked at startup and is invoked from a single-thread
+ * currently.
+ *
+ * @param walFilter the filter for processing WALs during recovery.
+ *
+ * @return the reference to the current options.
+ */
+ T setWalFilter(final AbstractWalFilter walFilter);
+
+ /**
+ * Get's the filter for processing WALs during recovery.
+ * See {@link #setWalFilter(AbstractWalFilter)}.
+ *
+ * @return the filter used for processing WALs during recovery.
+ */
+ WalFilter walFilter();
+
/**
* If true, then DB::Open / CreateColumnFamily / DropColumnFamily
* / SetOptions will fail if options file is not detected or properly
boolean avoidFlushDuringRecovery();
/**
- * By default RocksDB will flush all memtables on DB close if there are
- * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
- * DB close. Unpersisted data WILL BE LOST.
+ * Set this option to true during creation of database if you want
+ * to be able to ingest behind (call IngestExternalFile() skipping keys
+ * that already exist, rather than overwriting matching keys).
+ * Setting this option to true will affect 2 things:
+ * 1) Disable some internal optimizations around SST file compression
+ * 2) Reserve bottom-most level for ingested files only.
+ * 3) Note that num_levels should be >= 3 if this option is turned on.
+ *
+ * DEFAULT: false
+ *
+ * @param allowIngestBehind true to allow ingest behind, false to disallow.
+ *
+ * @return the reference to the current options.
+ */
+ T setAllowIngestBehind(final boolean allowIngestBehind);
+
+ /**
+ * Returns true if ingest behind is allowed.
+ * See {@link #setAllowIngestBehind(boolean)}.
+ *
+ * @return true if ingest behind is allowed, false otherwise.
+ */
+ boolean allowIngestBehind();
+
+ /**
+ * Needed to support differential snapshots.
+ * If set to true then DB will only process deletes with sequence number
+ * less than what was set by SetPreserveDeletesSequenceNumber(uint64_t ts).
+ * Clients are responsible to periodically call this method to advance
+ * the cutoff time. If this method is never called and preserve_deletes
+ * is set to true NO deletes will ever be processed.
+ * At the moment this only keeps normal deletes, SingleDeletes will
+ * not be preserved.
*
* DEFAULT: false
*
- * Dynamically changeable through
- * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
- * API.
+ * @param preserveDeletes true to preserve deletes.
+ *
+ * @return the reference to the current options.
+ */
+ T setPreserveDeletes(final boolean preserveDeletes);
+
+ /**
+ * Returns true if deletes are preserved.
+ * See {@link #setPreserveDeletes(boolean)}.
+ *
+ * @return true if deletes are preserved, false otherwise.
+ */
+ boolean preserveDeletes();
+
+ /**
+ * If enabled it uses two queues for writes, one for the ones with
+ * disable_memtable and one for the ones that also write to memtable. This
+ * allows the memtable writes not to lag behind other writes. It can be used
+ * to optimize MySQL 2PC in which only the commits, which are serial, write to
+ * memtable.
+ *
+ * DEFAULT: false
*
- * @param avoidFlushDuringShutdown true if we should avoid flush during
- * shutdown
+ * @param twoWriteQueues true to enable two write queues, false otherwise.
*
* @return the reference to the current options.
*/
- T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown);
+ T setTwoWriteQueues(final boolean twoWriteQueues);
/**
- * By default RocksDB will flush all memtables on DB close if there are
- * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
- * DB close. Unpersisted data WILL BE LOST.
+ * Returns true if two write queues are enabled.
+ *
+ * @return true if two write queues are enabled, false otherwise.
+ */
+ boolean twoWriteQueues();
+
+ /**
+ * If true WAL is not flushed automatically after each write. Instead it
+ * relies on manual invocation of FlushWAL to write the WAL buffer to its
+ * file.
*
* DEFAULT: false
*
- * Dynamically changeable through
- * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
- * API.
+ * @param manualWalFlush true to set disable automatic WAL flushing,
+ * false otherwise.
+ *
+ * @return the reference to the current options.
+ */
+ T setManualWalFlush(final boolean manualWalFlush);
+
+ /**
+ * Returns true if automatic WAL flushing is disabled.
+ * See {@link #setManualWalFlush(boolean)}.
+ *
+ * @return true if automatic WAL flushing is disabled, false otherwise.
+ */
+ boolean manualWalFlush();
+
+ /**
+ * If true, RocksDB supports flushing multiple column families and committing
+ * their results atomically to MANIFEST. Note that it is not
+ * necessary to set atomic_flush to true if WAL is always enabled since WAL
+ * allows the database to be restored to the last persistent state in WAL.
+ * This option is useful when there are column families with writes NOT
+ * protected by WAL.
+ * For manual flush, application has to specify which column families to
+ * flush atomically in {@link RocksDB#flush(FlushOptions, List)}.
+ * For auto-triggered flush, RocksDB atomically flushes ALL column families.
+ *
+ * Currently, any WAL-enabled writes after atomic flush may be replayed
+ * independently if the process crashes later and tries to recover.
+ *
+ * @param atomicFlush true to enable atomic flush of multiple column families.
+ *
+ * @return the reference to the current options.
+ */
+ T setAtomicFlush(final boolean atomicFlush);
+
+ /**
+ * Determine if atomic flush of multiple column families is enabled.
+ *
+ * See {@link #setAtomicFlush(boolean)}.
*
- * @return true if we should avoid flush during shutdown
+ * @return true if atomic flush is enabled.
*/
- boolean avoidFlushDuringShutdown();
+ boolean atomicFlush();
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+
+/**
+ * DataBlockIndexType used in conjunction with BlockBasedTable.
+ */
+public enum DataBlockIndexType {
+ /**
+ * traditional block type
+ */
+ kDataBlockBinarySearch((byte)0x0),
+
+ /**
+ * additional hash index
+ */
+ kDataBlockBinaryAndHash((byte)0x1);
+
+ private final byte value;
+
+ DataBlockIndexType(final byte value) {
+ this.value = value;
+ }
+
+ byte getValue() {
+ return value;
+ }
+}
package org.rocksdb;
+import java.util.Arrays;
+import java.util.List;
+
/**
* Base class for all Env implementations in RocksDB.
*/
public abstract class Env extends RocksObject {
- public static final int FLUSH_POOL = 0;
- public static final int COMPACTION_POOL = 1;
+
+ private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal());
+ static {
+ /**
+ * The Ownership of the Default Env belongs to C++
+ * and so we disown the native handle here so that
+ * we cannot accidentally free it from Java.
+ */
+ DEFAULT_ENV.disOwnNativeHandle();
+ }
/**
* <p>Returns the default environment suitable for the current operating
*
* <p>The result of {@code getDefault()} is a singleton whose ownership
* belongs to rocksdb c++. As a result, the returned RocksEnv will not
- * have the ownership of its c++ resource, and calling its dispose()
+ * have the ownership of its c++ resource, and calling its dispose()/close()
* will be no-op.</p>
*
* @return the default {@link org.rocksdb.RocksEnv} instance.
*/
public static Env getDefault() {
- return default_env_;
+ return DEFAULT_ENV;
}
/**
* for this environment.</p>
* <p>Default number: 1</p>
*
- * @param num the number of threads
+ * @param number the number of threads
*
* @return current {@link RocksEnv} instance.
*/
- public Env setBackgroundThreads(final int num) {
- return setBackgroundThreads(num, FLUSH_POOL);
+ public Env setBackgroundThreads(final int number) {
+ return setBackgroundThreads(number, Priority.LOW);
+ }
+
+ /**
+ * <p>Gets the number of background worker threads of the pool
+ * for this environment.</p>
+ *
+ * @return the number of threads.
+ */
+ public int getBackgroundThreads(final Priority priority) {
+ return getBackgroundThreads(nativeHandle_, priority.getValue());
}
/**
* <p>Sets the number of background worker threads of the specified thread
* pool for this environment.</p>
*
- * @param num the number of threads
- * @param poolID the id to specified a thread pool. Should be either
- * FLUSH_POOL or COMPACTION_POOL.
+ * @param number the number of threads
+ * @param priority the priority id of a specified thread pool.
*
* <p>Default number: 1</p>
* @return current {@link RocksEnv} instance.
*/
- public Env setBackgroundThreads(final int num, final int poolID) {
- setBackgroundThreads(nativeHandle_, num, poolID);
+ public Env setBackgroundThreads(final int number, final Priority priority) {
+ setBackgroundThreads(nativeHandle_, number, priority.getValue());
return this;
}
* <p>Returns the length of the queue associated with the specified
* thread pool.</p>
*
- * @param poolID the id to specified a thread pool. Should be either
- * FLUSH_POOL or COMPACTION_POOL.
+ * @param priority the priority id of a specified thread pool.
*
* @return the thread pool queue length.
*/
- public int getThreadPoolQueueLen(final int poolID) {
- return getThreadPoolQueueLen(nativeHandle_, poolID);
+ public int getThreadPoolQueueLen(final Priority priority) {
+ return getThreadPoolQueueLen(nativeHandle_, priority.getValue());
}
+ /**
+ * Enlarge number of background worker threads of a specific thread pool
+ * for this environment if it is smaller than specified. 'LOW' is the default
+ * pool.
+ *
+ * @param number the number of threads.
+ *
+ * @return current {@link RocksEnv} instance.
+ */
+ public Env incBackgroundThreadsIfNeeded(final int number,
+ final Priority priority) {
+ incBackgroundThreadsIfNeeded(nativeHandle_, number, priority.getValue());
+ return this;
+ }
- protected Env(final long nativeHandle) {
- super(nativeHandle);
+ /**
+ * Lower IO priority for threads from the specified pool.
+ *
+ * @param priority the priority id of a specified thread pool.
+ */
+ public Env lowerThreadPoolIOPriority(final Priority priority) {
+ lowerThreadPoolIOPriority(nativeHandle_, priority.getValue());
+ return this;
}
- static {
- default_env_ = new RocksEnv(getDefaultEnvInternal());
+ /**
+ * Lower CPU priority for threads from the specified pool.
+ *
+ * @param priority the priority id of a specified thread pool.
+ */
+ public Env lowerThreadPoolCPUPriority(final Priority priority) {
+ lowerThreadPoolCPUPriority(nativeHandle_, priority.getValue());
+ return this;
}
/**
- * <p>The static default Env. The ownership of its native handle
- * belongs to rocksdb c++ and is not able to be released on the Java
- * side.</p>
+ * Returns the status of all threads that belong to the current Env.
+ *
+ * @return the status of all threads belong to this env.
*/
- static Env default_env_;
+ public List<ThreadStatus> getThreadList() throws RocksDBException {
+ return Arrays.asList(getThreadList(nativeHandle_));
+ }
+
+ Env(final long nativeHandle) {
+ super(nativeHandle);
+ }
private static native long getDefaultEnvInternal();
private native void setBackgroundThreads(
- long handle, int num, int priority);
- private native int getThreadPoolQueueLen(long handle, int poolID);
+ final long handle, final int number, final byte priority);
+ private native int getBackgroundThreads(final long handle,
+ final byte priority);
+ private native int getThreadPoolQueueLen(final long handle,
+ final byte priority);
+ private native void incBackgroundThreadsIfNeeded(final long handle,
+ final int number, final byte priority);
+ private native void lowerThreadPoolIOPriority(final long handle,
+ final byte priority);
+ private native void lowerThreadPoolCPUPriority(final long handle,
+ final byte priority);
+ private native ThreadStatus[] getThreadList(final long handle)
+ throws RocksDBException;
}
package org.rocksdb;
+/**
+ * Options while opening a file to read/write
+ */
public class EnvOptions extends RocksObject {
static {
RocksDB.loadLibrary();
}
+ /**
+ * Construct with default Options
+ */
public EnvOptions() {
super(newEnvOptions());
}
- public EnvOptions setUseOsBuffer(final boolean useOsBuffer) {
- setUseOsBuffer(nativeHandle_, useOsBuffer);
- return this;
- }
-
- public boolean useOsBuffer() {
- assert(isOwningHandle());
- return useOsBuffer(nativeHandle_);
+ /**
+ * Construct from {@link DBOptions}.
+ *
+ * @param dbOptions the database options.
+ */
+ public EnvOptions(final DBOptions dbOptions) {
+ super(newEnvOptions(dbOptions.nativeHandle_));
}
+ /**
+ * Enable/Disable memory mapped reads.
+ *
+ * Default: false
+ *
+ * @param useMmapReads true to enable memory mapped reads, false to disable.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setUseMmapReads(final boolean useMmapReads) {
setUseMmapReads(nativeHandle_, useMmapReads);
return this;
}
+ /**
+ * Determine if memory mapped reads are in-use.
+ *
+ * @return true if memory mapped reads are in-use, false otherwise.
+ */
public boolean useMmapReads() {
assert(isOwningHandle());
return useMmapReads(nativeHandle_);
}
+ /**
+ * Enable/Disable memory mapped Writes.
+ *
+ * Default: true
+ *
+ * @param useMmapWrites true to enable memory mapped writes, false to disable.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setUseMmapWrites(final boolean useMmapWrites) {
setUseMmapWrites(nativeHandle_, useMmapWrites);
return this;
}
+ /**
+ * Determine if memory mapped writes are in-use.
+ *
+ * @return true if memory mapped writes are in-use, false otherwise.
+ */
public boolean useMmapWrites() {
assert(isOwningHandle());
return useMmapWrites(nativeHandle_);
}
+ /**
+ * Enable/Disable direct reads, i.e. {@code O_DIRECT}.
+ *
+ * Default: false
+ *
+ * @param useDirectReads true to enable direct reads, false to disable.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setUseDirectReads(final boolean useDirectReads) {
setUseDirectReads(nativeHandle_, useDirectReads);
return this;
}
+ /**
+ * Determine if direct reads are in-use.
+ *
+ * @return true if direct reads are in-use, false otherwise.
+ */
public boolean useDirectReads() {
assert(isOwningHandle());
return useDirectReads(nativeHandle_);
}
+ /**
+ * Enable/Disable direct writes, i.e. {@code O_DIRECT}.
+ *
+ * Default: false
+ *
+ * @param useDirectWrites true to enable direct writes, false to disable.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setUseDirectWrites(final boolean useDirectWrites) {
setUseDirectWrites(nativeHandle_, useDirectWrites);
return this;
}
+ /**
+ * Determine if direct writes are in-use.
+ *
+ * @return true if direct writes are in-use, false otherwise.
+ */
public boolean useDirectWrites() {
assert(isOwningHandle());
return useDirectWrites(nativeHandle_);
}
+ /**
+ * Enable/Disable fallocate calls.
+ *
+ * Default: true
+ *
+ * If false, {@code fallocate()} calls are bypassed.
+ *
+ * @param allowFallocate true to enable fallocate calls, false to disable.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setAllowFallocate(final boolean allowFallocate) {
setAllowFallocate(nativeHandle_, allowFallocate);
return this;
}
+ /**
+ * Determine if fallocate calls are used.
+ *
+ * @return true if fallocate calls are used, false otherwise.
+ */
public boolean allowFallocate() {
assert(isOwningHandle());
return allowFallocate(nativeHandle_);
}
+ /**
+ * Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors.
+ *
+ * Default: true
+ *
+ * @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit,
+ * false to disable.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setSetFdCloexec(final boolean setFdCloexec) {
setSetFdCloexec(nativeHandle_, setFdCloexec);
return this;
}
+ /**
+ * Determine i fthe {@code FD_CLOEXEC} bit is set when opening file
+ * descriptors.
+ *
+ * @return true if the {@code FB_CLOEXEC} bit is enabled, false otherwise.
+ */
public boolean setFdCloexec() {
assert(isOwningHandle());
return setFdCloexec(nativeHandle_);
}
+ /**
+ * Allows OS to incrementally sync files to disk while they are being
+ * written, in the background. Issue one request for every
+ * {@code bytesPerSync} written.
+ *
+ * Default: 0
+ *
+ * @param bytesPerSync 0 to disable, otherwise the number of bytes.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setBytesPerSync(final long bytesPerSync) {
setBytesPerSync(nativeHandle_, bytesPerSync);
return this;
}
+ /**
+ * Get the number of incremental bytes per sync written in the background.
+ *
+ * @return 0 if disabled, otherwise the number of bytes.
+ */
public long bytesPerSync() {
assert(isOwningHandle());
return bytesPerSync(nativeHandle_);
}
- public EnvOptions setFallocateWithKeepSize(final boolean fallocateWithKeepSize) {
+ /**
+ * If true, we will preallocate the file with {@code FALLOC_FL_KEEP_SIZE}
+ * flag, which means that file size won't change as part of preallocation.
+ * If false, preallocation will also change the file size. This option will
+ * improve the performance in workloads where you sync the data on every
+ * write. By default, we set it to true for MANIFEST writes and false for
+ * WAL writes
+ *
+ * @param fallocateWithKeepSize true to preallocate, false otherwise.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setFallocateWithKeepSize(
+ final boolean fallocateWithKeepSize) {
setFallocateWithKeepSize(nativeHandle_, fallocateWithKeepSize);
return this;
}
+ /**
+ * Determine if file is preallocated.
+ *
+ * @return true if the file is preallocated, false otherwise.
+ */
public boolean fallocateWithKeepSize() {
assert(isOwningHandle());
return fallocateWithKeepSize(nativeHandle_);
}
- public EnvOptions setCompactionReadaheadSize(final long compactionReadaheadSize) {
+ /**
+ * See {@link DBOptions#setCompactionReadaheadSize(long)}.
+ *
+ * @param compactionReadaheadSize the compaction read-ahead size.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setCompactionReadaheadSize(
+ final long compactionReadaheadSize) {
setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize);
return this;
}
+ /**
+ * See {@link DBOptions#compactionReadaheadSize()}.
+ *
+ * @return the compaction read-ahead size.
+ */
public long compactionReadaheadSize() {
assert(isOwningHandle());
return compactionReadaheadSize(nativeHandle_);
}
- public EnvOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
+ /**
+ * See {@link DBOptions#setRandomAccessMaxBufferSize(long)}.
+ *
+ * @param randomAccessMaxBufferSize the max buffer size for random access.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setRandomAccessMaxBufferSize(
+ final long randomAccessMaxBufferSize) {
setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize);
return this;
}
+ /**
+ * See {@link DBOptions#randomAccessMaxBufferSize()}.
+ *
+ * @return the max buffer size for random access.
+ */
public long randomAccessMaxBufferSize() {
assert(isOwningHandle());
return randomAccessMaxBufferSize(nativeHandle_);
}
- public EnvOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) {
+ /**
+ * See {@link DBOptions#setWritableFileMaxBufferSize(long)}.
+ *
+ * @param writableFileMaxBufferSize the max buffer size.
+ *
+ * @return the reference to these options.
+ */
+ public EnvOptions setWritableFileMaxBufferSize(
+ final long writableFileMaxBufferSize) {
setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize);
return this;
}
+ /**
+ * See {@link DBOptions#writableFileMaxBufferSize()}.
+ *
+ * @return the max buffer size.
+ */
public long writableFileMaxBufferSize() {
assert(isOwningHandle());
return writableFileMaxBufferSize(nativeHandle_);
}
+ /**
+ * Set the write rate limiter for flush and compaction.
+ *
+ * @param rateLimiter the rate limiter.
+ *
+ * @return the reference to these options.
+ */
public EnvOptions setRateLimiter(final RateLimiter rateLimiter) {
this.rateLimiter = rateLimiter;
setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_);
return this;
}
+ /**
+ * Get the write rate limiter for flush and compaction.
+ *
+ * @return the rate limiter.
+ */
public RateLimiter rateLimiter() {
assert(isOwningHandle());
return rateLimiter;
}
private native static long newEnvOptions();
-
+ private native static long newEnvOptions(final long dboptions_handle);
@Override protected final native void disposeInternal(final long handle);
- private native void setUseOsBuffer(final long handle, final boolean useOsBuffer);
-
- private native boolean useOsBuffer(final long handle);
-
- private native void setUseMmapReads(final long handle, final boolean useMmapReads);
-
+ private native void setUseMmapReads(final long handle,
+ final boolean useMmapReads);
private native boolean useMmapReads(final long handle);
-
- private native void setUseMmapWrites(final long handle, final boolean useMmapWrites);
-
+ private native void setUseMmapWrites(final long handle,
+ final boolean useMmapWrites);
private native boolean useMmapWrites(final long handle);
-
- private native void setUseDirectReads(final long handle, final boolean useDirectReads);
-
+ private native void setUseDirectReads(final long handle,
+ final boolean useDirectReads);
private native boolean useDirectReads(final long handle);
-
- private native void setUseDirectWrites(final long handle, final boolean useDirectWrites);
-
+ private native void setUseDirectWrites(final long handle,
+ final boolean useDirectWrites);
private native boolean useDirectWrites(final long handle);
-
- private native void setAllowFallocate(final long handle, final boolean allowFallocate);
-
+ private native void setAllowFallocate(final long handle,
+ final boolean allowFallocate);
private native boolean allowFallocate(final long handle);
-
- private native void setSetFdCloexec(final long handle, final boolean setFdCloexec);
-
+ private native void setSetFdCloexec(final long handle,
+ final boolean setFdCloexec);
private native boolean setFdCloexec(final long handle);
-
- private native void setBytesPerSync(final long handle, final long bytesPerSync);
-
+ private native void setBytesPerSync(final long handle,
+ final long bytesPerSync);
private native long bytesPerSync(final long handle);
-
private native void setFallocateWithKeepSize(
final long handle, final boolean fallocateWithKeepSize);
-
private native boolean fallocateWithKeepSize(final long handle);
-
private native void setCompactionReadaheadSize(
final long handle, final long compactionReadaheadSize);
-
private native long compactionReadaheadSize(final long handle);
-
private native void setRandomAccessMaxBufferSize(
final long handle, final long randomAccessMaxBufferSize);
-
private native long randomAccessMaxBufferSize(final long handle);
-
private native void setWritableFileMaxBufferSize(
final long handle, final long writableFileMaxBufferSize);
-
private native long writableFileMaxBufferSize(final long handle);
-
- private native void setRateLimiter(final long handle, final long rateLimiterHandle);
-
+ private native void setRateLimiter(final long handle,
+ final long rateLimiterHandle);
private RateLimiter rateLimiter;
}
* number of disk seeks form a handful to a single disk seek per
* DB::Get() call.
*/
+//TODO(AR) should be renamed FilterPolicy
public abstract class Filter extends RocksObject {
protected Filter(final long nativeHandle) {
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
package org.rocksdb;
/**
return waitForFlush(nativeHandle_);
}
+ /**
+ * Set to true so that flush would proceeds immediately even it it means
+ * writes will stall for the duration of the flush.
+ *
+ * Set to false so that the operation will wait until it's possible to do
+ * the flush without causing stall or until required flush is performed by
+ * someone else (foreground call or background thread).
+ *
+ * Default: false
+ *
+ * @param allowWriteStall true to allow writes to stall for flush, false
+ * otherwise.
+ *
+ * @return instance of current FlushOptions.
+ */
+ public FlushOptions setAllowWriteStall(final boolean allowWriteStall) {
+ assert(isOwningHandle());
+ setAllowWriteStall(nativeHandle_, allowWriteStall);
+ return this;
+ }
+
+ /**
+ * Returns true if writes are allowed to stall for flushes to complete, false
+ * otherwise.
+ *
+ * @return true if writes are allowed to stall for flushes
+ */
+ public boolean allowWriteStall() {
+ assert(isOwningHandle());
+ return allowWriteStall(nativeHandle_);
+ }
+
private native static long newFlushOptions();
@Override protected final native void disposeInternal(final long handle);
- private native void setWaitForFlush(long handle,
- boolean wait);
- private native boolean waitForFlush(long handle);
+
+ private native void setWaitForFlush(final long handle,
+ final boolean wait);
+ private native boolean waitForFlush(final long handle);
+ private native void setAllowWriteStall(final long handle,
+ final boolean allowWriteStall);
+ private native boolean allowWriteStall(final long handle);
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * HDFS environment.
+ */
+public class HdfsEnv extends Env {
+
+ /**
+ <p>Creates a new environment that is used for HDFS environment.</p>
+ *
+ * <p>The caller must delete the result when it is
+ * no longer needed.</p>
+ *
+ * @param fsName the HDFS as a string in the form "hdfs://hostname:port/"
+ */
+ public HdfsEnv(final String fsName) {
+ super(createHdfsEnv(fsName));
+ }
+
+ private static native long createHdfsEnv(final String fsName);
+ @Override protected final native void disposeInternal(final long handle);
+}
return value_;
}
- private IndexType(byte value) {
+ IndexType(byte value) {
value_ = value;
}
import java.util.List;
/**
- * IngestExternalFileOptions is used by {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}
+ * IngestExternalFileOptions is used by
+ * {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}.
*/
public class IngestExternalFileOptions extends RocksObject {
* Can be set to true to move the files instead of copying them.
*
* @param moveFiles true if files should be moved instead of copied
+ *
+ * @return the reference to the current IngestExternalFileOptions.
*/
- public void setMoveFiles(final boolean moveFiles) {
+ public IngestExternalFileOptions setMoveFiles(final boolean moveFiles) {
setMoveFiles(nativeHandle_, moveFiles);
+ return this;
}
/**
* that where created before the file was ingested.
*
* @param snapshotConsistency true if snapshot consistency is required
+ *
+ * @return the reference to the current IngestExternalFileOptions.
*/
- public void setSnapshotConsistency(final boolean snapshotConsistency) {
+ public IngestExternalFileOptions setSnapshotConsistency(
+ final boolean snapshotConsistency) {
setSnapshotConsistency(nativeHandle_, snapshotConsistency);
+ return this;
}
/**
* will fail if the file key range overlaps with existing keys or tombstones in the DB.
*
* @param allowGlobalSeqNo true if global seq numbers are required
+ *
+ * @return the reference to the current IngestExternalFileOptions.
*/
- public void setAllowGlobalSeqNo(final boolean allowGlobalSeqNo) {
+ public IngestExternalFileOptions setAllowGlobalSeqNo(
+ final boolean allowGlobalSeqNo) {
setAllowGlobalSeqNo(nativeHandle_, allowGlobalSeqNo);
+ return this;
}
/**
* (memtable flush required), IngestExternalFile will fail.
*
* @param allowBlockingFlush true if blocking flushes are allowed
+ *
+ * @return the reference to the current IngestExternalFileOptions.
*/
- public void setAllowBlockingFlush(final boolean allowBlockingFlush) {
+ public IngestExternalFileOptions setAllowBlockingFlush(
+ final boolean allowBlockingFlush) {
setAllowBlockingFlush(nativeHandle_, allowBlockingFlush);
+ return this;
+ }
+
+ /**
+ * Returns true if duplicate keys in the file being ingested are
+ * to be skipped rather than overwriting existing data under that key.
+ *
+ * @return true if duplicate keys in the file being ingested are to be
+ * skipped, false otherwise.
+ */
+ public boolean ingestBehind() {
+ return ingestBehind(nativeHandle_);
+ }
+
+ /**
+ * Set to true if you would like duplicate keys in the file being ingested
+ * to be skipped rather than overwriting existing data under that key.
+ *
+ * Usecase: back-fill of some historical data in the database without
+ * over-writing existing newer version of data.
+ *
+ * This option could only be used if the DB has been running
+ * with DBOptions#allowIngestBehind() == true since the dawn of time.
+ *
+ * All files will be ingested at the bottommost level with seqno=0.
+ *
+ * Default: false
+ *
+ * @param ingestBehind true if you would like duplicate keys in the file being
+ * ingested to be skipped.
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setIngestBehind(final boolean ingestBehind) {
+ setIngestBehind(nativeHandle_, ingestBehind);
+ return this;
+ }
+
+ /**
+ * Returns true write if the global_seqno is written to a given offset
+ * in the external SST file for backward compatibility.
+ *
+ * See {@link #setWriteGlobalSeqno(boolean)}.
+ *
+ * @return true if the global_seqno is written to a given offset,
+ * false otherwise.
+ */
+ public boolean writeGlobalSeqno() {
+ return writeGlobalSeqno(nativeHandle_);
+ }
+
+ /**
+ * Set to true if you would like to write the global_seqno to a given offset
+ * in the external SST file for backward compatibility.
+ *
+ * Older versions of RocksDB write the global_seqno to a given offset within
+ * the ingested SST files, and new versions of RocksDB do not.
+ *
+ * If you ingest an external SST using new version of RocksDB and would like
+ * to be able to downgrade to an older version of RocksDB, you should set
+ * {@link #writeGlobalSeqno()} to true.
+ *
+ * If your service is just starting to use the new RocksDB, we recommend that
+ * you set this option to false, which brings two benefits:
+ * 1. No extra random write for global_seqno during ingestion.
+ * 2. Without writing external SST file, it's possible to do checksum.
+ *
+ * We have a plan to set this option to false by default in the future.
+ *
+ * Default: true
+ *
+ * @param writeGlobalSeqno true to write the gloal_seqno to a given offset,
+ * false otherwise
+ *
+ * @return the reference to the current IngestExternalFileOptions.
+ */
+ public IngestExternalFileOptions setWriteGlobalSeqno(
+ final boolean writeGlobalSeqno) {
+ setWriteGlobalSeqno(nativeHandle_, writeGlobalSeqno);
+ return this;
}
private native static long newIngestExternalFileOptions();
private native static long newIngestExternalFileOptions(
final boolean moveFiles, final boolean snapshotConsistency,
final boolean allowGlobalSeqNo, final boolean allowBlockingFlush);
+ @Override protected final native void disposeInternal(final long handle);
+
private native boolean moveFiles(final long handle);
private native void setMoveFiles(final long handle, final boolean move_files);
private native boolean snapshotConsistency(final long handle);
private native boolean allowBlockingFlush(final long handle);
private native void setAllowBlockingFlush(final long handle,
final boolean allowBlockingFlush);
- @Override protected final native void disposeInternal(final long handle);
+ private native boolean ingestBehind(final long handle);
+ private native void setIngestBehind(final long handle,
+ final boolean ingestBehind);
+ private native boolean writeGlobalSeqno(final long handle);
+ private native void setWriteGlobalSeqno(final long handle,
+ final boolean writeGlobalSeqNo);
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * The metadata that describes a level.
+ */
+public class LevelMetaData {
+ private final int level;
+ private final long size;
+ private final SstFileMetaData[] files;
+
+ /**
+ * Called from JNI C++
+ */
+ private LevelMetaData(final int level, final long size,
+ final SstFileMetaData[] files) {
+ this.level = level;
+ this.size = size;
+ this.files = files;
+ }
+
+ /**
+ * The level which this meta data describes.
+ *
+ * @return the level
+ */
+ public int level() {
+ return level;
+ }
+
+ /**
+ * The size of this level in bytes, which is equal to the sum of
+ * the file size of its {@link #files()}.
+ *
+ * @return the size
+ */
+ public long size() {
+ return size;
+ }
+
+ /**
+ * The metadata of all sst files in this level.
+ *
+ * @return the metadata of the files
+ */
+ public List<SstFileMetaData> files() {
+ return Arrays.asList(files);
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The full set of metadata associated with each SST file.
+ */
+public class LiveFileMetaData extends SstFileMetaData {
+ private final byte[] columnFamilyName;
+ private final int level;
+
+ /**
+ * Called from JNI C++
+ */
+ private LiveFileMetaData(
+ final byte[] columnFamilyName,
+ final int level,
+ final String fileName,
+ final String path,
+ final long size,
+ final long smallestSeqno,
+ final long largestSeqno,
+ final byte[] smallestKey,
+ final byte[] largestKey,
+ final long numReadsSampled,
+ final boolean beingCompacted,
+ final long numEntries,
+ final long numDeletions) {
+ super(fileName, path, size, smallestSeqno, largestSeqno, smallestKey,
+ largestKey, numReadsSampled, beingCompacted, numEntries, numDeletions);
+ this.columnFamilyName = columnFamilyName;
+ this.level = level;
+ }
+
+ /**
+ * Get the name of the column family.
+ *
+ * @return the name of the column family
+ */
+ public byte[] columnFamilyName() {
+ return columnFamilyName;
+ }
+
+ /**
+ * Get the level at which this file resides.
+ *
+ * @return the level at which the file resides.
+ */
+ public int level() {
+ return level;
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public class LogFile {
+ private final String pathName;
+ private final long logNumber;
+ private final WalFileType type;
+ private final long startSequence;
+ private final long sizeFileBytes;
+
+ /**
+ * Called from JNI C++
+ */
+ private LogFile(final String pathName, final long logNumber,
+ final byte walFileTypeValue, final long startSequence,
+ final long sizeFileBytes) {
+ this.pathName = pathName;
+ this.logNumber = logNumber;
+ this.type = WalFileType.fromValue(walFileTypeValue);
+ this.startSequence = startSequence;
+ this.sizeFileBytes = sizeFileBytes;
+ }
+
+ /**
+ * Returns log file's pathname relative to the main db dir
+ * Eg. For a live-log-file = /000003.log
+ * For an archived-log-file = /archive/000003.log
+ *
+ * @return log file's pathname
+ */
+ public String pathName() {
+ return pathName;
+ }
+
+ /**
+ * Primary identifier for log file.
+ * This is directly proportional to creation time of the log file
+ *
+ * @return the log number
+ */
+ public long logNumber() {
+ return logNumber;
+ }
+
+ /**
+ * Log file can be either alive or archived.
+ *
+ * @return the type of the log file.
+ */
+ public WalFileType type() {
+ return type;
+ }
+
+ /**
+ * Starting sequence number of writebatch written in this log file.
+ *
+ * @return the stating sequence number
+ */
+ public long startSequence() {
+ return startSequence;
+ }
+
+ /**
+ * Size of log file on disk in Bytes.
+ *
+ * @return size of log file
+ */
+ public long sizeFileBytes() {
+ return sizeFileBytes;
+ }
+}
import java.util.*;
-public class MutableColumnFamilyOptions {
- private final static String KEY_VALUE_PAIR_SEPARATOR = ";";
- private final static char KEY_VALUE_SEPARATOR = '=';
- private final static String INT_ARRAY_INT_SEPARATOR = ",";
-
- private final String[] keys;
- private final String[] values;
-
- // user must use builder pattern, or parser
- private MutableColumnFamilyOptions(final String keys[],
- final String values[]) {
- this.keys = keys;
- this.values = values;
- }
-
- String[] getKeys() {
- return keys;
- }
+public class MutableColumnFamilyOptions
+ extends AbstractMutableOptions {
- String[] getValues() {
- return values;
+ /**
+ * User must use builder pattern, or parser.
+ *
+ * @param keys the keys
+ * @param values the values
+ *
+ * See {@link #builder()} and {@link #parse(String)}.
+ */
+ private MutableColumnFamilyOptions(final String[] keys,
+ final String[] values) {
+ super(keys, values);
}
/**
final MutableColumnFamilyOptionsBuilder builder =
new MutableColumnFamilyOptionsBuilder();
- final String options[] = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
+ final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
for(final String option : options) {
final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
if(equalsOffset <= 0) {
}
final String key = option.substring(0, equalsOffset);
- if(key == null || key.isEmpty()) {
+ if(key.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
final String value = option.substring(equalsOffset + 1);
- if(value == null || value.isEmpty()) {
+ if(value.isEmpty()) {
throw new IllegalArgumentException("options string is invalid");
}
return builder;
}
- /**
- * Returns a string representation
- * of MutableColumnFamilyOptions which is
- * suitable for consumption by {@link #parse(String)}
- *
- * @return String representation of MutableColumnFamilyOptions
- */
- @Override
- public String toString() {
- final StringBuilder buffer = new StringBuilder();
- for(int i = 0; i < keys.length; i++) {
- buffer
- .append(keys[i])
- .append(KEY_VALUE_SEPARATOR)
- .append(values[i]);
-
- if(i + 1 < keys.length) {
- buffer.append(KEY_VALUE_PAIR_SEPARATOR);
- }
- }
- return buffer.toString();
- }
-
- public enum ValueType {
- DOUBLE,
- LONG,
- INT,
- BOOLEAN,
- INT_ARRAY,
- ENUM
- }
+ private interface MutableColumnFamilyOptionKey extends MutableOptionKey {}
public enum MemtableOption implements MutableColumnFamilyOptionKey {
write_buffer_size(ValueType.LONG),
target_file_size_multiplier(ValueType.INT),
max_bytes_for_level_base(ValueType.LONG),
max_bytes_for_level_multiplier(ValueType.INT),
- max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY);
+ max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY),
+ ttl(ValueType.LONG);
private final ValueType valueType;
CompactionOption(final ValueType valueType) {
}
}
- private interface MutableColumnFamilyOptionKey {
- String name();
- ValueType getValueType();
- }
-
- private static abstract class MutableColumnFamilyOptionValue<T> {
- protected final T value;
-
- MutableColumnFamilyOptionValue(final T value) {
- this.value = value;
- }
-
- abstract double asDouble() throws NumberFormatException;
- abstract long asLong() throws NumberFormatException;
- abstract int asInt() throws NumberFormatException;
- abstract boolean asBoolean() throws IllegalStateException;
- abstract int[] asIntArray() throws IllegalStateException;
- abstract String asString();
- abstract T asObject();
- }
-
- private static class MutableColumnFamilyOptionStringValue
- extends MutableColumnFamilyOptionValue<String> {
- MutableColumnFamilyOptionStringValue(final String value) {
- super(value);
- }
-
- @Override
- double asDouble() throws NumberFormatException {
- return Double.parseDouble(value);
- }
-
- @Override
- long asLong() throws NumberFormatException {
- return Long.parseLong(value);
- }
-
- @Override
- int asInt() throws NumberFormatException {
- return Integer.parseInt(value);
- }
-
- @Override
- boolean asBoolean() throws IllegalStateException {
- return Boolean.parseBoolean(value);
- }
-
- @Override
- int[] asIntArray() throws IllegalStateException {
- throw new IllegalStateException("String is not applicable as int[]");
- }
-
- @Override
- String asString() {
- return value;
- }
-
- @Override
- String asObject() {
- return value;
- }
- }
-
- private static class MutableColumnFamilyOptionDoubleValue
- extends MutableColumnFamilyOptionValue<Double> {
- MutableColumnFamilyOptionDoubleValue(final double value) {
- super(value);
- }
-
- @Override
- double asDouble() {
- return value;
- }
-
- @Override
- long asLong() throws NumberFormatException {
- return value.longValue();
- }
-
- @Override
- int asInt() throws NumberFormatException {
- if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
- throw new NumberFormatException(
- "double value lies outside the bounds of int");
- }
- return value.intValue();
- }
-
- @Override
- boolean asBoolean() throws IllegalStateException {
- throw new IllegalStateException(
- "double is not applicable as boolean");
- }
-
- @Override
- int[] asIntArray() throws IllegalStateException {
- if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
- throw new NumberFormatException(
- "double value lies outside the bounds of int");
- }
- return new int[] { value.intValue() };
- }
-
- @Override
- String asString() {
- return Double.toString(value);
- }
-
- @Override
- Double asObject() {
- return value;
- }
- }
-
- private static class MutableColumnFamilyOptionLongValue
- extends MutableColumnFamilyOptionValue<Long> {
- MutableColumnFamilyOptionLongValue(final long value) {
- super(value);
- }
-
- @Override
- double asDouble() {
- if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
- throw new NumberFormatException(
- "long value lies outside the bounds of int");
- }
- return value.doubleValue();
- }
-
- @Override
- long asLong() throws NumberFormatException {
- return value;
- }
-
- @Override
- int asInt() throws NumberFormatException {
- if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
- throw new NumberFormatException(
- "long value lies outside the bounds of int");
- }
- return value.intValue();
- }
-
- @Override
- boolean asBoolean() throws IllegalStateException {
- throw new IllegalStateException(
- "long is not applicable as boolean");
- }
-
- @Override
- int[] asIntArray() throws IllegalStateException {
- if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
- throw new NumberFormatException(
- "long value lies outside the bounds of int");
- }
- return new int[] { value.intValue() };
- }
-
- @Override
- String asString() {
- return Long.toString(value);
- }
-
- @Override
- Long asObject() {
- return value;
- }
- }
-
- private static class MutableColumnFamilyOptionIntValue
- extends MutableColumnFamilyOptionValue<Integer> {
- MutableColumnFamilyOptionIntValue(final int value) {
- super(value);
- }
-
- @Override
- double asDouble() {
- if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
- throw new NumberFormatException("int value lies outside the bounds of int");
- }
- return value.doubleValue();
- }
-
- @Override
- long asLong() throws NumberFormatException {
- return value;
- }
-
- @Override
- int asInt() throws NumberFormatException {
- return value;
- }
-
- @Override
- boolean asBoolean() throws IllegalStateException {
- throw new IllegalStateException("int is not applicable as boolean");
- }
-
- @Override
- int[] asIntArray() throws IllegalStateException {
- return new int[] { value };
- }
-
- @Override
- String asString() {
- return Integer.toString(value);
- }
-
- @Override
- Integer asObject() {
- return value;
- }
- }
-
- private static class MutableColumnFamilyOptionBooleanValue
- extends MutableColumnFamilyOptionValue<Boolean> {
- MutableColumnFamilyOptionBooleanValue(final boolean value) {
- super(value);
- }
-
- @Override
- double asDouble() {
- throw new NumberFormatException("boolean is not applicable as double");
- }
-
- @Override
- long asLong() throws NumberFormatException {
- throw new NumberFormatException("boolean is not applicable as Long");
- }
-
- @Override
- int asInt() throws NumberFormatException {
- throw new NumberFormatException("boolean is not applicable as int");
- }
-
- @Override
- boolean asBoolean() {
- return value;
- }
-
- @Override
- int[] asIntArray() throws IllegalStateException {
- throw new IllegalStateException("boolean is not applicable as int[]");
- }
-
- @Override
- String asString() {
- return Boolean.toString(value);
- }
-
- @Override
- Boolean asObject() {
- return value;
- }
- }
-
- private static class MutableColumnFamilyOptionIntArrayValue
- extends MutableColumnFamilyOptionValue<int[]> {
- MutableColumnFamilyOptionIntArrayValue(final int[] value) {
- super(value);
- }
-
- @Override
- double asDouble() {
- throw new NumberFormatException("int[] is not applicable as double");
- }
-
- @Override
- long asLong() throws NumberFormatException {
- throw new NumberFormatException("int[] is not applicable as Long");
- }
-
- @Override
- int asInt() throws NumberFormatException {
- throw new NumberFormatException("int[] is not applicable as int");
- }
-
- @Override
- boolean asBoolean() {
- throw new NumberFormatException("int[] is not applicable as boolean");
- }
-
- @Override
- int[] asIntArray() throws IllegalStateException {
- return value;
- }
-
- @Override
- String asString() {
- final StringBuilder builder = new StringBuilder();
- for(int i = 0; i < value.length; i++) {
- builder.append(Integer.toString(i));
- if(i + 1 < value.length) {
- builder.append(INT_ARRAY_INT_SEPARATOR);
- }
- }
- return builder.toString();
- }
-
- @Override
- int[] asObject() {
- return value;
- }
- }
-
- private static class MutableColumnFamilyOptionEnumValue<T extends Enum<T>>
- extends MutableColumnFamilyOptionValue<T> {
-
- MutableColumnFamilyOptionEnumValue(final T value) {
- super(value);
- }
-
- @Override
- double asDouble() throws NumberFormatException {
- throw new NumberFormatException("Enum is not applicable as double");
- }
-
- @Override
- long asLong() throws NumberFormatException {
- throw new NumberFormatException("Enum is not applicable as long");
- }
-
- @Override
- int asInt() throws NumberFormatException {
- throw new NumberFormatException("Enum is not applicable as int");
- }
-
- @Override
- boolean asBoolean() throws IllegalStateException {
- throw new NumberFormatException("Enum is not applicable as boolean");
- }
-
- @Override
- int[] asIntArray() throws IllegalStateException {
- throw new NumberFormatException("Enum is not applicable as int[]");
- }
-
- @Override
- String asString() {
- return value.name();
- }
-
- @Override
- T asObject() {
- return value;
- }
- }
-
public static class MutableColumnFamilyOptionsBuilder
- implements MutableColumnFamilyOptionsInterface {
+ extends AbstractMutableOptionsBuilder<MutableColumnFamilyOptions, MutableColumnFamilyOptionsBuilder, MutableColumnFamilyOptionKey>
+ implements MutableColumnFamilyOptionsInterface<MutableColumnFamilyOptionsBuilder> {
private final static Map<String, MutableColumnFamilyOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
static {
}
}
- private final Map<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> options = new LinkedHashMap<>();
-
- public MutableColumnFamilyOptions build() {
- final String keys[] = new String[options.size()];
- final String values[] = new String[options.size()];
-
- int i = 0;
- for(final Map.Entry<MutableColumnFamilyOptionKey, MutableColumnFamilyOptionValue<?>> option : options.entrySet()) {
- keys[i] = option.getKey().name();
- values[i] = option.getValue().asString();
- i++;
- }
-
- return new MutableColumnFamilyOptions(keys, values);
- }
-
- private MutableColumnFamilyOptionsBuilder setDouble(
- final MutableColumnFamilyOptionKey key, final double value) {
- if(key.getValueType() != ValueType.DOUBLE) {
- throw new IllegalArgumentException(
- key + " does not accept a double value");
- }
- options.put(key, new MutableColumnFamilyOptionDoubleValue(value));
- return this;
- }
-
- private double getDouble(final MutableColumnFamilyOptionKey key)
- throws NoSuchElementException, NumberFormatException {
- final MutableColumnFamilyOptionValue<?> value = options.get(key);
- if(value == null) {
- throw new NoSuchElementException(key.name() + " has not been set");
- }
- return value.asDouble();
- }
-
- private MutableColumnFamilyOptionsBuilder setLong(
- final MutableColumnFamilyOptionKey key, final long value) {
- if(key.getValueType() != ValueType.LONG) {
- throw new IllegalArgumentException(
- key + " does not accept a long value");
- }
- options.put(key, new MutableColumnFamilyOptionLongValue(value));
- return this;
- }
-
- private long getLong(final MutableColumnFamilyOptionKey key)
- throws NoSuchElementException, NumberFormatException {
- final MutableColumnFamilyOptionValue<?> value = options.get(key);
- if(value == null) {
- throw new NoSuchElementException(key.name() + " has not been set");
- }
- return value.asLong();
- }
-
- private MutableColumnFamilyOptionsBuilder setInt(
- final MutableColumnFamilyOptionKey key, final int value) {
- if(key.getValueType() != ValueType.INT) {
- throw new IllegalArgumentException(
- key + " does not accept an integer value");
- }
- options.put(key, new MutableColumnFamilyOptionIntValue(value));
- return this;
- }
-
- private int getInt(final MutableColumnFamilyOptionKey key)
- throws NoSuchElementException, NumberFormatException {
- final MutableColumnFamilyOptionValue<?> value = options.get(key);
- if(value == null) {
- throw new NoSuchElementException(key.name() + " has not been set");
- }
- return value.asInt();
- }
-
- private MutableColumnFamilyOptionsBuilder setBoolean(
- final MutableColumnFamilyOptionKey key, final boolean value) {
- if(key.getValueType() != ValueType.BOOLEAN) {
- throw new IllegalArgumentException(
- key + " does not accept a boolean value");
- }
- options.put(key, new MutableColumnFamilyOptionBooleanValue(value));
- return this;
- }
-
- private boolean getBoolean(final MutableColumnFamilyOptionKey key)
- throws NoSuchElementException, NumberFormatException {
- final MutableColumnFamilyOptionValue<?> value = options.get(key);
- if(value == null) {
- throw new NoSuchElementException(key.name() + " has not been set");
- }
- return value.asBoolean();
+ private MutableColumnFamilyOptionsBuilder() {
+ super();
}
- private MutableColumnFamilyOptionsBuilder setIntArray(
- final MutableColumnFamilyOptionKey key, final int[] value) {
- if(key.getValueType() != ValueType.INT_ARRAY) {
- throw new IllegalArgumentException(
- key + " does not accept an int array value");
- }
- options.put(key, new MutableColumnFamilyOptionIntArrayValue(value));
- return this;
- }
-
- private int[] getIntArray(final MutableColumnFamilyOptionKey key)
- throws NoSuchElementException, NumberFormatException {
- final MutableColumnFamilyOptionValue<?> value = options.get(key);
- if(value == null) {
- throw new NoSuchElementException(key.name() + " has not been set");
- }
- return value.asIntArray();
- }
-
- private <T extends Enum<T>> MutableColumnFamilyOptionsBuilder setEnum(
- final MutableColumnFamilyOptionKey key, final T value) {
- if(key.getValueType() != ValueType.ENUM) {
- throw new IllegalArgumentException(
- key + " does not accept a Enum value");
- }
- options.put(key, new MutableColumnFamilyOptionEnumValue<T>(value));
+ @Override
+ protected MutableColumnFamilyOptionsBuilder self() {
return this;
-
}
- private <T extends Enum<T>> T getEnum(final MutableColumnFamilyOptionKey key)
- throws NoSuchElementException, NumberFormatException {
- final MutableColumnFamilyOptionValue<?> value = options.get(key);
- if(value == null) {
- throw new NoSuchElementException(key.name() + " has not been set");
- }
-
- if(!(value instanceof MutableColumnFamilyOptionEnumValue)) {
- throw new NoSuchElementException(key.name() + " is not of Enum type");
- }
-
- return ((MutableColumnFamilyOptionEnumValue<T>)value).asObject();
+ @Override
+ protected Map<String, MutableColumnFamilyOptionKey> allKeys() {
+ return ALL_KEYS_LOOKUP;
}
- public MutableColumnFamilyOptionsBuilder fromString(final String keyStr,
- final String valueStr) throws IllegalArgumentException {
- Objects.requireNonNull(keyStr);
- Objects.requireNonNull(valueStr);
-
- final MutableColumnFamilyOptionKey key = ALL_KEYS_LOOKUP.get(keyStr);
- switch(key.getValueType()) {
- case DOUBLE:
- return setDouble(key, Double.parseDouble(valueStr));
-
- case LONG:
- return setLong(key, Long.parseLong(valueStr));
-
- case INT:
- return setInt(key, Integer.parseInt(valueStr));
-
- case BOOLEAN:
- return setBoolean(key, Boolean.parseBoolean(valueStr));
-
- case INT_ARRAY:
- final String[] strInts = valueStr
- .trim().split(INT_ARRAY_INT_SEPARATOR);
- if(strInts == null || strInts.length == 0) {
- throw new IllegalArgumentException(
- "int array value is not correctly formatted");
- }
-
- final int value[] = new int[strInts.length];
- int i = 0;
- for(final String strInt : strInts) {
- value[i++] = Integer.parseInt(strInt);
- }
- return setIntArray(key, value);
- }
-
- throw new IllegalStateException(
- key + " has unknown value type: " + key.getValueType());
+ @Override
+ protected MutableColumnFamilyOptions build(final String[] keys,
+ final String[] values) {
+ return new MutableColumnFamilyOptions(keys, values);
}
@Override
public boolean reportBgIoStats() {
return getBoolean(MiscOption.report_bg_io_stats);
}
+
+ @Override
+ public MutableColumnFamilyOptionsBuilder setTtl(final long ttl) {
+ return setLong(CompactionOption.ttl, ttl);
+ }
+
+ @Override
+ public long ttl() {
+ return getLong(CompactionOption.ttl);
+ }
}
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+public class MutableDBOptions extends AbstractMutableOptions {
+
+ /**
+ * User must use builder pattern, or parser.
+ *
+ * @param keys the keys
+ * @param values the values
+ *
+ * See {@link #builder()} and {@link #parse(String)}.
+ */
+ private MutableDBOptions(final String[] keys, final String[] values) {
+ super(keys, values);
+ }
+
+ /**
+ * Creates a builder which allows you
+ * to set MutableDBOptions in a fluent
+ * manner
+ *
+ * @return A builder for MutableDBOptions
+ */
+ public static MutableDBOptionsBuilder builder() {
+ return new MutableDBOptionsBuilder();
+ }
+
+ /**
+ * Parses a String representation of MutableDBOptions
+ *
+ * The format is: key1=value1;key2=value2;key3=value3 etc
+ *
+ * For int[] values, each int should be separated by a comma, e.g.
+ *
+ * key1=value1;intArrayKey1=1,2,3
+ *
+ * @param str The string representation of the mutable db options
+ *
+ * @return A builder for the mutable db options
+ */
+ public static MutableDBOptionsBuilder parse(final String str) {
+ Objects.requireNonNull(str);
+
+ final MutableDBOptionsBuilder builder =
+ new MutableDBOptionsBuilder();
+
+ final String[] options = str.trim().split(KEY_VALUE_PAIR_SEPARATOR);
+ for(final String option : options) {
+ final int equalsOffset = option.indexOf(KEY_VALUE_SEPARATOR);
+ if(equalsOffset <= 0) {
+ throw new IllegalArgumentException(
+ "options string has an invalid key=value pair");
+ }
+
+ final String key = option.substring(0, equalsOffset);
+ if(key.isEmpty()) {
+ throw new IllegalArgumentException("options string is invalid");
+ }
+
+ final String value = option.substring(equalsOffset + 1);
+ if(value.isEmpty()) {
+ throw new IllegalArgumentException("options string is invalid");
+ }
+
+ builder.fromString(key, value);
+ }
+
+ return builder;
+ }
+
+ private interface MutableDBOptionKey extends MutableOptionKey {}
+
+ public enum DBOption implements MutableDBOptionKey {
+ max_background_jobs(ValueType.INT),
+ base_background_compactions(ValueType.INT),
+ max_background_compactions(ValueType.INT),
+ avoid_flush_during_shutdown(ValueType.BOOLEAN),
+ writable_file_max_buffer_size(ValueType.LONG),
+ delayed_write_rate(ValueType.LONG),
+ max_total_wal_size(ValueType.LONG),
+ delete_obsolete_files_period_micros(ValueType.LONG),
+ stats_dump_period_sec(ValueType.INT),
+ max_open_files(ValueType.INT),
+ bytes_per_sync(ValueType.LONG),
+ wal_bytes_per_sync(ValueType.LONG),
+ compaction_readahead_size(ValueType.LONG);
+
+ private final ValueType valueType;
+ DBOption(final ValueType valueType) {
+ this.valueType = valueType;
+ }
+
+ @Override
+ public ValueType getValueType() {
+ return valueType;
+ }
+ }
+
+ public static class MutableDBOptionsBuilder
+ extends AbstractMutableOptionsBuilder<MutableDBOptions, MutableDBOptionsBuilder, MutableDBOptionKey>
+ implements MutableDBOptionsInterface<MutableDBOptionsBuilder> {
+
+ private final static Map<String, MutableDBOptionKey> ALL_KEYS_LOOKUP = new HashMap<>();
+ static {
+ for(final MutableDBOptionKey key : DBOption.values()) {
+ ALL_KEYS_LOOKUP.put(key.name(), key);
+ }
+ }
+
+ private MutableDBOptionsBuilder() {
+ super();
+ }
+
+ @Override
+ protected MutableDBOptionsBuilder self() {
+ return this;
+ }
+
+ @Override
+ protected Map<String, MutableDBOptionKey> allKeys() {
+ return ALL_KEYS_LOOKUP;
+ }
+
+ @Override
+ protected MutableDBOptions build(final String[] keys,
+ final String[] values) {
+ return new MutableDBOptions(keys, values);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setMaxBackgroundJobs(
+ final int maxBackgroundJobs) {
+ return setInt(DBOption.max_background_jobs, maxBackgroundJobs);
+ }
+
+ @Override
+ public int maxBackgroundJobs() {
+ return getInt(DBOption.max_background_jobs);
+ }
+
+ @Override
+ public void setBaseBackgroundCompactions(
+ final int baseBackgroundCompactions) {
+ setInt(DBOption.base_background_compactions,
+ baseBackgroundCompactions);
+ }
+
+ @Override
+ public int baseBackgroundCompactions() {
+ return getInt(DBOption.base_background_compactions);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setMaxBackgroundCompactions(
+ final int maxBackgroundCompactions) {
+ return setInt(DBOption.max_background_compactions,
+ maxBackgroundCompactions);
+ }
+
+ @Override
+ public int maxBackgroundCompactions() {
+ return getInt(DBOption.max_background_compactions);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setAvoidFlushDuringShutdown(
+ final boolean avoidFlushDuringShutdown) {
+ return setBoolean(DBOption.avoid_flush_during_shutdown,
+ avoidFlushDuringShutdown);
+ }
+
+ @Override
+ public boolean avoidFlushDuringShutdown() {
+ return getBoolean(DBOption.avoid_flush_during_shutdown);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setWritableFileMaxBufferSize(
+ final long writableFileMaxBufferSize) {
+ return setLong(DBOption.writable_file_max_buffer_size,
+ writableFileMaxBufferSize);
+ }
+
+ @Override
+ public long writableFileMaxBufferSize() {
+ return getLong(DBOption.writable_file_max_buffer_size);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setDelayedWriteRate(
+ final long delayedWriteRate) {
+ return setLong(DBOption.delayed_write_rate,
+ delayedWriteRate);
+ }
+
+ @Override
+ public long delayedWriteRate() {
+ return getLong(DBOption.delayed_write_rate);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setMaxTotalWalSize(
+ final long maxTotalWalSize) {
+ return setLong(DBOption.max_total_wal_size, maxTotalWalSize);
+ }
+
+ @Override
+ public long maxTotalWalSize() {
+ return getLong(DBOption.max_total_wal_size);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setDeleteObsoleteFilesPeriodMicros(
+ final long micros) {
+ return setLong(DBOption.delete_obsolete_files_period_micros, micros);
+ }
+
+ @Override
+ public long deleteObsoleteFilesPeriodMicros() {
+ return getLong(DBOption.delete_obsolete_files_period_micros);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setStatsDumpPeriodSec(
+ final int statsDumpPeriodSec) {
+ return setInt(DBOption.stats_dump_period_sec, statsDumpPeriodSec);
+ }
+
+ @Override
+ public int statsDumpPeriodSec() {
+ return getInt(DBOption.stats_dump_period_sec);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setMaxOpenFiles(final int maxOpenFiles) {
+ return setInt(DBOption.max_open_files, maxOpenFiles);
+ }
+
+ @Override
+ public int maxOpenFiles() {
+ return getInt(DBOption.max_open_files);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setBytesPerSync(final long bytesPerSync) {
+ return setLong(DBOption.bytes_per_sync, bytesPerSync);
+ }
+
+ @Override
+ public long bytesPerSync() {
+ return getLong(DBOption.bytes_per_sync);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setWalBytesPerSync(
+ final long walBytesPerSync) {
+ return setLong(DBOption.wal_bytes_per_sync, walBytesPerSync);
+ }
+
+ @Override
+ public long walBytesPerSync() {
+ return getLong(DBOption.wal_bytes_per_sync);
+ }
+
+ @Override
+ public MutableDBOptionsBuilder setCompactionReadaheadSize(
+ final long compactionReadaheadSize) {
+ return setLong(DBOption.compaction_readahead_size,
+ compactionReadaheadSize);
+ }
+
+ @Override
+ public long compactionReadaheadSize() {
+ return getLong(DBOption.compaction_readahead_size);
+ }
+ }
+}
--- /dev/null
+package org.rocksdb;
+
+public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface> {
+
+ /**
+ * Specifies the maximum number of concurrent background jobs (both flushes
+ * and compactions combined).
+ * Default: 2
+ *
+ * @param maxBackgroundJobs number of max concurrent background jobs
+ * @return the instance of the current object.
+ */
+ T setMaxBackgroundJobs(int maxBackgroundJobs);
+
+ /**
+ * Returns the maximum number of concurrent background jobs (both flushes
+ * and compactions combined).
+ * Default: 2
+ *
+ * @return the maximum number of concurrent background jobs.
+ */
+ int maxBackgroundJobs();
+
+ /**
+ * Suggested number of concurrent background compaction jobs, submitted to
+ * the default LOW priority thread pool.
+ * Default: 1
+ *
+ * @param baseBackgroundCompactions Suggested number of background compaction
+ * jobs
+ *
+ * @deprecated Use {@link #setMaxBackgroundJobs(int)}
+ */
+ @Deprecated
+ void setBaseBackgroundCompactions(int baseBackgroundCompactions);
+
+ /**
+ * Suggested number of concurrent background compaction jobs, submitted to
+ * the default LOW priority thread pool.
+ * Default: 1
+ *
+ * @return Suggested number of background compaction jobs
+ */
+ int baseBackgroundCompactions();
+
+ /**
+ * Specifies the maximum number of concurrent background compaction jobs,
+ * submitted to the default LOW priority thread pool.
+ * If you're increasing this, also consider increasing number of threads in
+ * LOW priority thread pool. For more information, see
+ * Default: 1
+ *
+ * @param maxBackgroundCompactions the maximum number of background
+ * compaction jobs.
+ * @return the instance of the current object.
+ *
+ * @see RocksEnv#setBackgroundThreads(int)
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
+ * @see DBOptionsInterface#maxBackgroundFlushes()
+ */
+ T setMaxBackgroundCompactions(int maxBackgroundCompactions);
+
+ /**
+ * Returns the maximum number of concurrent background compaction jobs,
+ * submitted to the default LOW priority thread pool.
+ * When increasing this number, we may also want to consider increasing
+ * number of threads in LOW priority thread pool.
+ * Default: 1
+ *
+ * @return the maximum number of concurrent background compaction jobs.
+ * @see RocksEnv#setBackgroundThreads(int)
+ * @see RocksEnv#setBackgroundThreads(int, Priority)
+ *
+ * @deprecated Use {@link #setMaxBackgroundJobs(int)}
+ */
+ @Deprecated
+ int maxBackgroundCompactions();
+
+ /**
+ * By default RocksDB will flush all memtables on DB close if there are
+ * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
+ * DB close. Unpersisted data WILL BE LOST.
+ *
+ * DEFAULT: false
+ *
+ * Dynamically changeable through
+ * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
+ * API.
+ *
+ * @param avoidFlushDuringShutdown true if we should avoid flush during
+ * shutdown
+ *
+ * @return the reference to the current options.
+ */
+ T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown);
+
+ /**
+ * By default RocksDB will flush all memtables on DB close if there are
+ * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup
+ * DB close. Unpersisted data WILL BE LOST.
+ *
+ * DEFAULT: false
+ *
+ * Dynamically changeable through
+ * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
+ * API.
+ *
+ * @return true if we should avoid flush during shutdown
+ */
+ boolean avoidFlushDuringShutdown();
+
+ /**
+ * This is the maximum buffer size that is used by WritableFileWriter.
+ * On Windows, we need to maintain an aligned buffer for writes.
+ * We allow the buffer to grow until it's size hits the limit.
+ *
+ * Default: 1024 * 1024 (1 MB)
+ *
+ * @param writableFileMaxBufferSize the maximum buffer size
+ *
+ * @return the reference to the current options.
+ */
+ T setWritableFileMaxBufferSize(long writableFileMaxBufferSize);
+
+ /**
+ * This is the maximum buffer size that is used by WritableFileWriter.
+ * On Windows, we need to maintain an aligned buffer for writes.
+ * We allow the buffer to grow until it's size hits the limit.
+ *
+ * Default: 1024 * 1024 (1 MB)
+ *
+ * @return the maximum buffer size
+ */
+ long writableFileMaxBufferSize();
+
+ /**
+ * The limited write rate to DB if
+ * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
+ * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
+ * or we are writing to the last mem table allowed and we allow more than 3
+ * mem tables. It is calculated using size of user write requests before
+ * compression. RocksDB may decide to slow down more if the compaction still
+ * gets behind further.
+ *
+ * Unit: bytes per second.
+ *
+ * Default: 16MB/s
+ *
+ * @param delayedWriteRate the rate in bytes per second
+ *
+ * @return the reference to the current options.
+ */
+ T setDelayedWriteRate(long delayedWriteRate);
+
+ /**
+ * The limited write rate to DB if
+ * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or
+ * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered,
+ * or we are writing to the last mem table allowed and we allow more than 3
+ * mem tables. It is calculated using size of user write requests before
+ * compression. RocksDB may decide to slow down more if the compaction still
+ * gets behind further.
+ *
+ * Unit: bytes per second.
+ *
+ * Default: 16MB/s
+ *
+ * @return the rate in bytes per second
+ */
+ long delayedWriteRate();
+
+ /**
+ * <p>Once write-ahead logs exceed this size, we will start forcing the
+ * flush of column families whose memtables are backed by the oldest live
+ * WAL file (i.e. the ones that are causing all the space amplification).
+ * </p>
+ * <p>If set to 0 (default), we will dynamically choose the WAL size limit to
+ * be [sum of all write_buffer_size * max_write_buffer_number] * 2</p>
+ * <p>This option takes effect only when there are more than one column family as
+ * otherwise the wal size is dictated by the write_buffer_size.</p>
+ * <p>Default: 0</p>
+ *
+ * @param maxTotalWalSize max total wal size.
+ * @return the instance of the current object.
+ */
+ T setMaxTotalWalSize(long maxTotalWalSize);
+
+ /**
+ * <p>Returns the max total wal size. Once write-ahead logs exceed this size,
+ * we will start forcing the flush of column families whose memtables are
+ * backed by the oldest live WAL file (i.e. the ones that are causing all
+ * the space amplification).</p>
+ *
+ * <p>If set to 0 (default), we will dynamically choose the WAL size limit
+ * to be [sum of all write_buffer_size * max_write_buffer_number] * 2
+ * </p>
+ *
+ * @return max total wal size
+ */
+ long maxTotalWalSize();
+
+ /**
+ * The periodicity when obsolete files get deleted. The default
+ * value is 6 hours. The files that get out of scope by compaction
+ * process will still get automatically delete on every compaction,
+ * regardless of this setting
+ *
+ * @param micros the time interval in micros
+ * @return the instance of the current object.
+ */
+ T setDeleteObsoleteFilesPeriodMicros(long micros);
+
+ /**
+ * The periodicity when obsolete files get deleted. The default
+ * value is 6 hours. The files that get out of scope by compaction
+ * process will still get automatically delete on every compaction,
+ * regardless of this setting
+ *
+ * @return the time interval in micros when obsolete files will be deleted.
+ */
+ long deleteObsoleteFilesPeriodMicros();
+
+ /**
+ * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
+ * Default: 600 (10 minutes)
+ *
+ * @param statsDumpPeriodSec time interval in seconds.
+ * @return the instance of the current object.
+ */
+ T setStatsDumpPeriodSec(int statsDumpPeriodSec);
+
+ /**
+ * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
+ * Default: 600 (10 minutes)
+ *
+ * @return time interval in seconds.
+ */
+ int statsDumpPeriodSec();
+
+ /**
+ * Number of open files that can be used by the DB. You may need to
+ * increase this if your database has a large working set. Value -1 means
+ * files opened are always kept open. You can estimate number of files based
+ * on {@code target_file_size_base} and {@code target_file_size_multiplier}
+ * for level-based compaction. For universal-style compaction, you can usually
+ * set it to -1.
+ * Default: 5000
+ *
+ * @param maxOpenFiles the maximum number of open files.
+ * @return the instance of the current object.
+ */
+ T setMaxOpenFiles(int maxOpenFiles);
+
+ /**
+ * Number of open files that can be used by the DB. You may need to
+ * increase this if your database has a large working set. Value -1 means
+ * files opened are always kept open. You can estimate number of files based
+ * on {@code target_file_size_base} and {@code target_file_size_multiplier}
+ * for level-based compaction. For universal-style compaction, you can usually
+ * set it to -1.
+ *
+ * @return the maximum number of open files.
+ */
+ int maxOpenFiles();
+
+ /**
+ * Allows OS to incrementally sync files to disk while they are being
+ * written, asynchronously, in the background.
+ * Issue one request for every bytes_per_sync written. 0 turns it off.
+ * Default: 0
+ *
+ * @param bytesPerSync size in bytes
+ * @return the instance of the current object.
+ */
+ T setBytesPerSync(long bytesPerSync);
+
+ /**
+ * Allows OS to incrementally sync files to disk while they are being
+ * written, asynchronously, in the background.
+ * Issue one request for every bytes_per_sync written. 0 turns it off.
+ * Default: 0
+ *
+ * @return size in bytes
+ */
+ long bytesPerSync();
+
+ /**
+ * Same as {@link #setBytesPerSync(long)} , but applies to WAL files
+ *
+ * Default: 0, turned off
+ *
+ * @param walBytesPerSync size in bytes
+ * @return the instance of the current object.
+ */
+ T setWalBytesPerSync(long walBytesPerSync);
+
+ /**
+ * Same as {@link #bytesPerSync()} , but applies to WAL files
+ *
+ * Default: 0, turned off
+ *
+ * @return size in bytes
+ */
+ long walBytesPerSync();
+
+
+ /**
+ * If non-zero, we perform bigger reads when doing compaction. If you're
+ * running RocksDB on spinning disks, you should set this to at least 2MB.
+ *
+ * That way RocksDB's compaction is doing sequential instead of random reads.
+ * When non-zero, we also force
+ * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
+ *
+ * Default: 0
+ *
+ * @param compactionReadaheadSize The compaction read-ahead size
+ *
+ * @return the reference to the current options.
+ */
+ T setCompactionReadaheadSize(final long compactionReadaheadSize);
+
+ /**
+ * If non-zero, we perform bigger reads when doing compaction. If you're
+ * running RocksDB on spinning disks, you should set this to at least 2MB.
+ *
+ * That way RocksDB's compaction is doing sequential instead of random reads.
+ * When non-zero, we also force
+ * {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
+ *
+ * Default: 0
+ *
+ * @return The compaction read-ahead size
+ */
+ long compactionReadaheadSize();
+}
--- /dev/null
+package org.rocksdb;
+
+public interface MutableOptionKey {
+ enum ValueType {
+ DOUBLE,
+ LONG,
+ INT,
+ BOOLEAN,
+ INT_ARRAY,
+ ENUM
+ }
+
+ String name();
+ ValueType getValueType();
+}
--- /dev/null
+package org.rocksdb;
+
+import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR;
+
+public abstract class MutableOptionValue<T> {
+
+ abstract double asDouble() throws NumberFormatException;
+ abstract long asLong() throws NumberFormatException;
+ abstract int asInt() throws NumberFormatException;
+ abstract boolean asBoolean() throws IllegalStateException;
+ abstract int[] asIntArray() throws IllegalStateException;
+ abstract String asString();
+ abstract T asObject();
+
+ private static abstract class MutableOptionValueObject<T>
+ extends MutableOptionValue<T> {
+ protected final T value;
+
+ private MutableOptionValueObject(final T value) {
+ this.value = value;
+ }
+
+ @Override T asObject() {
+ return value;
+ }
+ }
+
+ static MutableOptionValue<String> fromString(final String s) {
+ return new MutableOptionStringValue(s);
+ }
+
+ static MutableOptionValue<Double> fromDouble(final double d) {
+ return new MutableOptionDoubleValue(d);
+ }
+
+ static MutableOptionValue<Long> fromLong(final long d) {
+ return new MutableOptionLongValue(d);
+ }
+
+ static MutableOptionValue<Integer> fromInt(final int i) {
+ return new MutableOptionIntValue(i);
+ }
+
+ static MutableOptionValue<Boolean> fromBoolean(final boolean b) {
+ return new MutableOptionBooleanValue(b);
+ }
+
+ static MutableOptionValue<int[]> fromIntArray(final int[] ix) {
+ return new MutableOptionIntArrayValue(ix);
+ }
+
+ static <N extends Enum<N>> MutableOptionValue<N> fromEnum(final N value) {
+ return new MutableOptionEnumValue<>(value);
+ }
+
+ static class MutableOptionStringValue
+ extends MutableOptionValueObject<String> {
+ MutableOptionStringValue(final String value) {
+ super(value);
+ }
+
+ @Override
+ double asDouble() throws NumberFormatException {
+ return Double.parseDouble(value);
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return Long.parseLong(value);
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ return Integer.parseInt(value);
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ return Boolean.parseBoolean(value);
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ throw new IllegalStateException("String is not applicable as int[]");
+ }
+
+ @Override
+ String asString() {
+ return value;
+ }
+ }
+
+ static class MutableOptionDoubleValue
+ extends MutableOptionValue<Double> {
+ private final double value;
+ MutableOptionDoubleValue(final double value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ return value;
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return Double.valueOf(value).longValue();
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "double value lies outside the bounds of int");
+ }
+ return Double.valueOf(value).intValue();
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new IllegalStateException(
+ "double is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "double value lies outside the bounds of int");
+ }
+ return new int[] { Double.valueOf(value).intValue() };
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Double asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionLongValue
+ extends MutableOptionValue<Long> {
+ private final long value;
+
+ MutableOptionLongValue(final long value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
+ throw new NumberFormatException(
+ "long value lies outside the bounds of int");
+ }
+ return Long.valueOf(value).doubleValue();
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return value;
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "long value lies outside the bounds of int");
+ }
+ return Long.valueOf(value).intValue();
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new IllegalStateException(
+ "long is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) {
+ throw new NumberFormatException(
+ "long value lies outside the bounds of int");
+ }
+ return new int[] { Long.valueOf(value).intValue() };
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Long asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionIntValue
+ extends MutableOptionValue<Integer> {
+ private final int value;
+
+ MutableOptionIntValue(final int value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ if(value > Double.MAX_VALUE || value < Double.MIN_VALUE) {
+ throw new NumberFormatException("int value lies outside the bounds of int");
+ }
+ return Integer.valueOf(value).doubleValue();
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ return value;
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ return value;
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new IllegalStateException("int is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ return new int[] { value };
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Integer asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionBooleanValue
+ extends MutableOptionValue<Boolean> {
+ private final boolean value;
+
+ MutableOptionBooleanValue(final boolean value) {
+ this.value = value;
+ }
+
+ @Override
+ double asDouble() {
+ throw new NumberFormatException("boolean is not applicable as double");
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ throw new NumberFormatException("boolean is not applicable as Long");
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ throw new NumberFormatException("boolean is not applicable as int");
+ }
+
+ @Override
+ boolean asBoolean() {
+ return value;
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ throw new IllegalStateException("boolean is not applicable as int[]");
+ }
+
+ @Override
+ String asString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ Boolean asObject() {
+ return value;
+ }
+ }
+
+ static class MutableOptionIntArrayValue
+ extends MutableOptionValueObject<int[]> {
+ MutableOptionIntArrayValue(final int[] value) {
+ super(value);
+ }
+
+ @Override
+ double asDouble() {
+ throw new NumberFormatException("int[] is not applicable as double");
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ throw new NumberFormatException("int[] is not applicable as Long");
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ throw new NumberFormatException("int[] is not applicable as int");
+ }
+
+ @Override
+ boolean asBoolean() {
+ throw new NumberFormatException("int[] is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ return value;
+ }
+
+ @Override
+ String asString() {
+ final StringBuilder builder = new StringBuilder();
+ for(int i = 0; i < value.length; i++) {
+ builder.append(i);
+ if(i + 1 < value.length) {
+ builder.append(INT_ARRAY_INT_SEPARATOR);
+ }
+ }
+ return builder.toString();
+ }
+ }
+
+ static class MutableOptionEnumValue<T extends Enum<T>>
+ extends MutableOptionValueObject<T> {
+
+ MutableOptionEnumValue(final T value) {
+ super(value);
+ }
+
+ @Override
+ double asDouble() throws NumberFormatException {
+ throw new NumberFormatException("Enum is not applicable as double");
+ }
+
+ @Override
+ long asLong() throws NumberFormatException {
+ throw new NumberFormatException("Enum is not applicable as long");
+ }
+
+ @Override
+ int asInt() throws NumberFormatException {
+ throw new NumberFormatException("Enum is not applicable as int");
+ }
+
+ @Override
+ boolean asBoolean() throws IllegalStateException {
+ throw new NumberFormatException("Enum is not applicable as boolean");
+ }
+
+ @Override
+ int[] asIntArray() throws IllegalStateException {
+ throw new NumberFormatException("Enum is not applicable as int[]");
+ }
+
+ @Override
+ String asString() {
+ return value.name();
+ }
+ }
+
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The operation stage.
+ */
+public enum OperationStage {
+ STAGE_UNKNOWN((byte)0x0),
+ STAGE_FLUSH_RUN((byte)0x1),
+ STAGE_FLUSH_WRITE_L0((byte)0x2),
+ STAGE_COMPACTION_PREPARE((byte)0x3),
+ STAGE_COMPACTION_RUN((byte)0x4),
+ STAGE_COMPACTION_PROCESS_KV((byte)0x5),
+ STAGE_COMPACTION_INSTALL((byte)0x6),
+ STAGE_COMPACTION_SYNC_FILE((byte)0x7),
+ STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8),
+ STAGE_MEMTABLE_ROLLBACK((byte)0x9),
+ STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA);
+
+ private final byte value;
+
+ OperationStage(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the Operation stage from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the operation stage
+ *
+ * @throws IllegalArgumentException if the value does not match
+ * an OperationStage
+ */
+ static OperationStage fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final OperationStage threadType : OperationStage.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Unknown value for OperationStage: " + value);
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The type used to refer to a thread operation.
+ *
+ * A thread operation describes high-level action of a thread,
+ * examples include compaction and flush.
+ */
+public enum OperationType {
+ OP_UNKNOWN((byte)0x0),
+ OP_COMPACTION((byte)0x1),
+ OP_FLUSH((byte)0x2);
+
+ private final byte value;
+
+ OperationType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the Operation type from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the operation type
+ *
+ * @throws IllegalArgumentException if the value does not match
+ * an OperationType
+ */
+ static OperationType fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final OperationType threadType : OperationType.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Unknown value for OperationType: " + value);
+ }
+}
return otdb;
}
+
+ /**
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * This is similar to {@link #closeE()} except that it
+ * silently ignores any errors.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
@Override
public Transaction beginTransaction(final WriteOptions writeOptions) {
return new Transaction(this, beginTransaction(nativeHandle_,
return db;
}
+ @Override protected final native void disposeInternal(final long handle);
+
protected static native long open(final long optionsHandle,
final String path) throws RocksDBException;
protected static native long[] open(final long handle, final String path,
final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
private native long beginTransaction(final long handle,
final long writeOptionsHandle);
private native long beginTransaction(final long handle,
final long optimisticTransactionOptionsHandle,
final long oldTransactionHandle);
private native long getBaseDB(final long handle);
- @Override protected final native void disposeInternal(final long handle);
}
* automaticallyand native resources will be released as part of the process.
*/
public class Options extends RocksObject
- implements DBOptionsInterface<Options>, ColumnFamilyOptionsInterface<Options>,
+ implements DBOptionsInterface<Options>,
+ MutableDBOptionsInterface<Options>,
+ ColumnFamilyOptionsInterface<Options>,
MutableColumnFamilyOptionsInterface<Options> {
static {
RocksDB.loadLibrary();
}
@Override
- public void setMaxSubcompactions(final int maxSubcompactions) {
+ public Options setMaxSubcompactions(final int maxSubcompactions) {
assert(isOwningHandle());
setMaxSubcompactions(nativeHandle_, maxSubcompactions);
+ return this;
}
@Override
return delayedWriteRate(nativeHandle_);
}
+ @Override
+ public Options setEnablePipelinedWrite(final boolean enablePipelinedWrite) {
+ setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite);
+ return this;
+ }
+
+ @Override
+ public boolean enablePipelinedWrite() {
+ return enablePipelinedWrite(nativeHandle_);
+ }
+
@Override
public Options setAllowConcurrentMemtableWrite(
final boolean allowConcurrentMemtableWrite) {
return this.rowCache_;
}
+ @Override
+ public Options setWalFilter(final AbstractWalFilter walFilter) {
+ assert(isOwningHandle());
+ setWalFilter(nativeHandle_, walFilter.nativeHandle_);
+ this.walFilter_ = walFilter;
+ return this;
+ }
+
+ @Override
+ public WalFilter walFilter() {
+ assert(isOwningHandle());
+ return this.walFilter_;
+ }
+
@Override
public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) {
assert(isOwningHandle());
return avoidFlushDuringShutdown(nativeHandle_);
}
+ @Override
+ public Options setAllowIngestBehind(final boolean allowIngestBehind) {
+ assert(isOwningHandle());
+ setAllowIngestBehind(nativeHandle_, allowIngestBehind);
+ return this;
+ }
+
+ @Override
+ public boolean allowIngestBehind() {
+ assert(isOwningHandle());
+ return allowIngestBehind(nativeHandle_);
+ }
+
+ @Override
+ public Options setPreserveDeletes(final boolean preserveDeletes) {
+ assert(isOwningHandle());
+ setPreserveDeletes(nativeHandle_, preserveDeletes);
+ return this;
+ }
+
+ @Override
+ public boolean preserveDeletes() {
+ assert(isOwningHandle());
+ return preserveDeletes(nativeHandle_);
+ }
+
+ @Override
+ public Options setTwoWriteQueues(final boolean twoWriteQueues) {
+ assert(isOwningHandle());
+ setTwoWriteQueues(nativeHandle_, twoWriteQueues);
+ return this;
+ }
+
+ @Override
+ public boolean twoWriteQueues() {
+ assert(isOwningHandle());
+ return twoWriteQueues(nativeHandle_);
+ }
+
+ @Override
+ public Options setManualWalFlush(final boolean manualWalFlush) {
+ assert(isOwningHandle());
+ setManualWalFlush(nativeHandle_, manualWalFlush);
+ return this;
+ }
+
+ @Override
+ public boolean manualWalFlush() {
+ assert(isOwningHandle());
+ return manualWalFlush(nativeHandle_);
+ }
+
@Override
public MemTableConfig memTableConfig() {
return this.memTableConfig_;
bottommostCompressionType(nativeHandle_));
}
+ @Override
+ public Options setBottommostCompressionOptions(
+ final CompressionOptions bottommostCompressionOptions) {
+ setBottommostCompressionOptions(nativeHandle_,
+ bottommostCompressionOptions.nativeHandle_);
+ this.bottommostCompressionOptions_ = bottommostCompressionOptions;
+ return this;
+ }
+
+ @Override
+ public CompressionOptions bottommostCompressionOptions() {
+ return this.bottommostCompressionOptions_;
+ }
+
@Override
public Options setCompressionOptions(
final CompressionOptions compressionOptions) {
@Override
public CompactionStyle compactionStyle() {
- return CompactionStyle.values()[compactionStyle(nativeHandle_)];
+ return CompactionStyle.fromValue(compactionStyle(nativeHandle_));
}
@Override
return reportBgIoStats(nativeHandle_);
}
+ @Override
+ public Options setTtl(final long ttl) {
+ setTtl(nativeHandle_, ttl);
+ return this;
+ }
+
+ @Override
+ public long ttl() {
+ return ttl(nativeHandle_);
+ }
+
@Override
public Options setCompactionOptionsUniversal(
final CompactionOptionsUniversal compactionOptionsUniversal) {
return forceConsistencyChecks(nativeHandle_);
}
+ @Override
+ public Options setAtomicFlush(final boolean atomicFlush) {
+ setAtomicFlush(nativeHandle_, atomicFlush);
+ return this;
+ }
+
+ @Override
+ public boolean atomicFlush() {
+ return atomicFlush(nativeHandle_);
+ }
+
private native static long newOptions();
private native static long newOptions(long dbOptHandle,
long cfOptHandle);
private native boolean enableThreadTracking(long handle);
private native void setDelayedWriteRate(long handle, long delayedWriteRate);
private native long delayedWriteRate(long handle);
+ private native void setEnablePipelinedWrite(final long handle,
+ final boolean pipelinedWrite);
+ private native boolean enablePipelinedWrite(final long handle);
private native void setAllowConcurrentMemtableWrite(long handle,
boolean allowConcurrentMemtableWrite);
private native boolean allowConcurrentMemtableWrite(long handle);
final boolean allow2pc);
private native boolean allow2pc(final long handle);
private native void setRowCache(final long handle,
- final long row_cache_handle);
+ final long rowCacheHandle);
+ private native void setWalFilter(final long handle,
+ final long walFilterHandle);
private native void setFailIfOptionsFileError(final long handle,
final boolean failIfOptionsFileError);
private native boolean failIfOptionsFileError(final long handle);
private native void setAvoidFlushDuringShutdown(final long handle,
final boolean avoidFlushDuringShutdown);
private native boolean avoidFlushDuringShutdown(final long handle);
+ private native void setAllowIngestBehind(final long handle,
+ final boolean allowIngestBehind);
+ private native boolean allowIngestBehind(final long handle);
+ private native void setPreserveDeletes(final long handle,
+ final boolean preserveDeletes);
+ private native boolean preserveDeletes(final long handle);
+ private native void setTwoWriteQueues(final long handle,
+ final boolean twoWriteQueues);
+ private native boolean twoWriteQueues(final long handle);
+ private native void setManualWalFlush(final long handle,
+ final boolean manualWalFlush);
+ private native boolean manualWalFlush(final long handle);
+
// CF native handles
private native void optimizeForSmallDb(final long handle);
private native void setBottommostCompressionType(long handle,
byte bottommostCompressionType);
private native byte bottommostCompressionType(long handle);
+ private native void setBottommostCompressionOptions(final long handle,
+ final long bottommostCompressionOptionsHandle);
private native void setCompressionOptions(long handle,
long compressionOptionsHandle);
private native void useFixedLengthPrefixExtractor(
private native void setReportBgIoStats(final long handle,
final boolean reportBgIoStats);
private native boolean reportBgIoStats(final long handle);
+ private native void setTtl(final long handle, final long ttl);
+ private native long ttl(final long handle);
private native void setCompactionOptionsUniversal(final long handle,
final long compactionOptionsUniversalHandle);
private native void setCompactionOptionsFIFO(final long handle,
private native void setForceConsistencyChecks(final long handle,
final boolean forceConsistencyChecks);
private native boolean forceConsistencyChecks(final long handle);
+ private native void setAtomicFlush(final long handle,
+ final boolean atomicFlush);
+ private native boolean atomicFlush(final long handle);
// instance variables
// NOTE: If you add new member variables, please update the copy constructor above!
compactionFilterFactory_;
private CompactionOptionsUniversal compactionOptionsUniversal_;
private CompactionOptionsFIFO compactionOptionsFIFO_;
+ private CompressionOptions bottommostCompressionOptions_;
private CompressionOptions compressionOptions_;
private Cache rowCache_;
+ private WalFilter walFilter_;
private WriteBufferManager writeBufferManager_;
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Persistent cache for caching IO pages on a persistent medium. The
+ * cache is specifically designed for persistent read cache.
+ */
+public class PersistentCache extends RocksObject {
+
+ public PersistentCache(final Env env, final String path, final long size,
+ final Logger logger, final boolean optimizedForNvm)
+ throws RocksDBException {
+ super(newPersistentCache(env.nativeHandle_, path, size,
+ logger.nativeHandle_, optimizedForNvm));
+ }
+
+ private native static long newPersistentCache(final long envHandle,
+ final String path, final long size, final long loggerHandle,
+ final boolean optimizedForNvm) throws RocksDBException;
+
+ @Override protected final native void disposeInternal(final long handle);
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The Thread Pool priority.
+ */
+public enum Priority {
+ BOTTOM((byte) 0x0),
+ LOW((byte) 0x1),
+ HIGH((byte)0x2),
+ TOTAL((byte)0x3);
+
+ private final byte value;
+
+ Priority(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * <p>Returns the byte value of the enumerations value.</p>
+ *
+ * @return byte representation
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get Priority by byte value.
+ *
+ * @param value byte representation of Priority.
+ *
+ * @return {@link org.rocksdb.Priority} instance.
+ * @throws java.lang.IllegalArgumentException if an invalid
+ * value is provided.
+ */
+ static Priority getPriority(final byte value) {
+ for (final Priority priority : Priority.values()) {
+ if (priority.getValue() == value){
+ return priority;
+ }
+ }
+ throw new IllegalArgumentException("Illegal value provided for Priority.");
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Range from start to limit.
+ */
+public class Range {
+ final Slice start;
+ final Slice limit;
+
+ public Range(final Slice start, final Slice limit) {
+ this.start = start;
+ this.limit = limit;
+ }
+}
super(newReadOptions());
}
+ /**
+ * @param verifyChecksums verification will be performed on every read
+ * when set to true
+ * @param fillCache if true, then fill-cache behavior will be performed.
+ */
+ public ReadOptions(final boolean verifyChecksums, final boolean fillCache) {
+ super(newReadOptions(verifyChecksums, fillCache));
+ }
+
/**
* Copy constructor.
*
*/
public ReadOptions(ReadOptions other) {
super(copyReadOptions(other.nativeHandle_));
- iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
- iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
+ this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_;
+ this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_;
}
/**
/**
* Returns whether managed iterators will be used.
*
- * @return the setting of whether managed iterators will be used, by default false
+ * @return the setting of whether managed iterators will be used,
+ * by default false
+ *
+ * @deprecated This options is not used anymore.
*/
+ @Deprecated
public boolean managed() {
assert(isOwningHandle());
return managed(nativeHandle_);
*
* @param managed if true, then managed iterators will be enabled.
* @return the reference to the current ReadOptions.
+ *
+ * @deprecated This options is not used anymore.
*/
+ @Deprecated
public ReadOptions setManaged(final boolean managed) {
assert(isOwningHandle());
setManaged(nativeHandle_, managed);
return prefixSameAsStart(nativeHandle_);
}
-
/**
* Enforce that the iterator only iterates over the same prefix as the seek.
* This option is effective only for prefix seeks, i.e. prefix_extractor is
return this;
}
+ /**
+ * A threshold for the number of keys that can be skipped before failing an
+ * iterator seek as incomplete.
+ *
+ * @return the number of keys that can be skipped
+ * before failing an iterator seek as incomplete.
+ */
+ public long maxSkippableInternalKeys() {
+ assert(isOwningHandle());
+ return maxSkippableInternalKeys(nativeHandle_);
+ }
+
+ /**
+ * A threshold for the number of keys that can be skipped before failing an
+ * iterator seek as incomplete. The default value of 0 should be used to
+ * never fail a request as incomplete, even on skipping too many keys.
+ *
+ * Default: 0
+ *
+ * @param maxSkippableInternalKeys the number of keys that can be skipped
+ * before failing an iterator seek as incomplete.
+ *
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setMaxSkippableInternalKeys(
+ final long maxSkippableInternalKeys) {
+ assert(isOwningHandle());
+ setMaxSkippableInternalKeys(nativeHandle_, maxSkippableInternalKeys);
+ return this;
+ }
+
/**
* If true, keys deleted using the DeleteRange() API will be visible to
* readers until they are naturally deleted during compaction. This improves
}
/**
- * Defines the extent upto which the forward iterator can returns entries.
- * Once the bound is reached, Valid() will be false. iterate_upper_bound
- * is exclusive ie the bound value is not a valid entry. If
- * iterator_extractor is not null, the Seek target and iterator_upper_bound
+ * Defines the smallest key at which the backward
+ * iterator can return an entry. Once the bound is passed,
+ * {@link RocksIterator#isValid()} will be false.
+ *
+ * The lower bound is inclusive i.e. the bound value is a valid
+ * entry.
+ *
+ * If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
* need to have the same prefix. This is because ordering is not guaranteed
- * outside of prefix domain. There is no lower bound on the iterator.
+ * outside of prefix domain.
*
- * Default: nullptr
+ * Default: null
+ *
+ * @param iterateLowerBound Slice representing the upper bound
+ * @return the reference to the current ReadOptions.
+ */
+ public ReadOptions setIterateLowerBound(final Slice iterateLowerBound) {
+ assert(isOwningHandle());
+ if (iterateLowerBound != null) {
+ // Hold onto a reference so it doesn't get garbage collected out from under us.
+ iterateLowerBoundSlice_ = iterateLowerBound;
+ setIterateLowerBound(nativeHandle_, iterateLowerBoundSlice_.getNativeHandle());
+ }
+ return this;
+ }
+
+ /**
+ * Returns the smallest key at which the backward
+ * iterator can return an entry.
+ *
+ * The lower bound is inclusive i.e. the bound value is a valid entry.
+ *
+ * @return the smallest key, or null if there is no lower bound defined.
+ */
+ public Slice iterateLowerBound() {
+ assert(isOwningHandle());
+ final long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_);
+ if (lowerBoundSliceHandle != 0) {
+ // Disown the new slice - it's owned by the C++ side of the JNI boundary
+ // from the perspective of this method.
+ return new Slice(lowerBoundSliceHandle, false);
+ }
+ return null;
+ }
+
+ /**
+ * Defines the extent up to which the forward iterator
+ * can returns entries. Once the bound is reached,
+ * {@link RocksIterator#isValid()} will be false.
+ *
+ * The upper bound is exclusive i.e. the bound value is not a valid entry.
+ *
+ * If iterator_extractor is not null, the Seek target and iterate_upper_bound
+ * need to have the same prefix. This is because ordering is not guaranteed
+ * outside of prefix domain.
+ *
+ * Default: null
*
* @param iterateUpperBound Slice representing the upper bound
* @return the reference to the current ReadOptions.
public ReadOptions setIterateUpperBound(final Slice iterateUpperBound) {
assert(isOwningHandle());
if (iterateUpperBound != null) {
- // Hold onto a reference so it doesn't get garbaged collected out from under us.
+ // Hold onto a reference so it doesn't get garbage collected out from under us.
iterateUpperBoundSlice_ = iterateUpperBound;
setIterateUpperBound(nativeHandle_, iterateUpperBoundSlice_.getNativeHandle());
}
}
/**
- * Defines the extent upto which the forward iterator can returns entries.
- * Once the bound is reached, Valid() will be false. iterate_upper_bound
- * is exclusive ie the bound value is not a valid entry. If
- * iterator_extractor is not null, the Seek target and iterator_upper_bound
- * need to have the same prefix. This is because ordering is not guaranteed
- * outside of prefix domain. There is no lower bound on the iterator.
+ * Returns the largest key at which the forward
+ * iterator can return an entry.
*
- * Default: nullptr
+ * The upper bound is exclusive i.e. the bound value is not a valid entry.
*
- * @return Slice representing current iterate_upper_bound setting, or null if
- * one does not exist.
+ * @return the largest key, or null if there is no upper bound defined.
*/
public Slice iterateUpperBound() {
assert(isOwningHandle());
- long upperBoundSliceHandle = iterateUpperBound(nativeHandle_);
+ final long upperBoundSliceHandle = iterateUpperBound(nativeHandle_);
if (upperBoundSliceHandle != 0) {
// Disown the new slice - it's owned by the C++ side of the JNI boundary
// from the perspective of this method.
}
/**
- * Defines the smallest key at which the backward iterator can return an
- * entry. Once the bound is passed, Valid() will be false.
- * `iterate_lower_bound` is inclusive ie the bound value is a valid entry.
+ * A callback to determine whether relevant keys for this scan exist in a
+ * given table based on the table's properties. The callback is passed the
+ * properties of each table during iteration. If the callback returns false,
+ * the table will not be scanned. This option only affects Iterators and has
+ * no impact on point lookups.
*
- * If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
- * need to have the same prefix. This is because ordering is not guaranteed
- * outside of prefix domain.
+ * Default: null (every table will be scanned)
*
- * Default: nullptr
+ * @param tableFilter the table filter for the callback.
*
- * @param iterateLowerBound Slice representing the lower bound
* @return the reference to the current ReadOptions.
*/
- public ReadOptions setIterateLowerBound(final Slice iterateLowerBound) {
+ public ReadOptions setTableFilter(final AbstractTableFilter tableFilter) {
assert(isOwningHandle());
- if (iterateLowerBound != null) {
- // Hold onto a reference so it doesn't get garbaged collected out from under us.
- iterateLowerBoundSlice_ = iterateLowerBound;
- setIterateLowerBound(nativeHandle_, iterateLowerBoundSlice_.getNativeHandle());
- }
+ setTableFilter(nativeHandle_, tableFilter.nativeHandle_);
return this;
}
/**
- * Defines the smallest key at which the backward iterator can return an
- * entry. Once the bound is passed, Valid() will be false.
- * `iterate_lower_bound` is inclusive ie the bound value is a valid entry.
+ * Needed to support differential snapshots. Has 2 effects:
+ * 1) Iterator will skip all internal keys with seqnum < iter_start_seqnum
+ * 2) if this param > 0 iterator will return INTERNAL keys instead of user
+ * keys; e.g. return tombstones as well.
*
- * If prefix_extractor is not null, the Seek target and `iterate_lower_bound`
- * need to have the same prefix. This is because ordering is not guaranteed
- * outside of prefix domain.
+ * Default: 0 (don't filter by seqnum, return user keys)
*
- * Default: nullptr
+ * @param startSeqnum the starting sequence number.
*
- * @return Slice representing current iterate_lower_bound setting, or null if
- * one does not exist.
+ * @return the reference to the current ReadOptions.
*/
- public Slice iterateLowerBound() {
+ public ReadOptions setIterStartSeqnum(final long startSeqnum) {
assert(isOwningHandle());
- long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_);
- if (lowerBoundSliceHandle != 0) {
- // Disown the new slice - it's owned by the C++ side of the JNI boundary
- // from the perspective of this method.
- return new Slice(lowerBoundSliceHandle, false);
- }
- return null;
+ setIterStartSeqnum(nativeHandle_, startSeqnum);
+ return this;
+ }
+
+ /**
+ * Returns the starting Sequence Number of any iterator.
+ * See {@link #setIterStartSeqnum(long)}.
+ *
+ * @return the starting sequence number of any iterator.
+ */
+ public long iterStartSeqnum() {
+ assert(isOwningHandle());
+ return iterStartSeqnum(nativeHandle_);
}
// instance variables
// NOTE: If you add new member variables, please update the copy constructor above!
//
- // Hold a reference to any iterate upper/lower bound that was set on this object
- // until we're destroyed or it's overwritten. That way the caller can freely
- // leave scope without us losing the Java Slice object, which during close()
- // would also reap its associated rocksdb::Slice native object since it's
- // possibly (likely) to be an owning handle.
- protected Slice iterateUpperBoundSlice_;
- protected Slice iterateLowerBoundSlice_;
+ // Hold a reference to any iterate lower or upper bound that was set on this
+ // object until we're destroyed or it's overwritten. That way the caller can
+ // freely leave scope without us losing the Java Slice object, which during
+ // close() would also reap its associated rocksdb::Slice native object since
+ // it's possibly (likely) to be an owning handle.
+ private Slice iterateLowerBoundSlice_;
+ private Slice iterateUpperBoundSlice_;
private native static long newReadOptions();
+ private native static long newReadOptions(final boolean verifyChecksums,
+ final boolean fillCache);
private native static long copyReadOptions(long handle);
+ @Override protected final native void disposeInternal(final long handle);
+
private native boolean verifyChecksums(long handle);
private native void setVerifyChecksums(long handle, boolean verifyChecksums);
private native boolean fillCache(long handle);
private native long readaheadSize(final long handle);
private native void setReadaheadSize(final long handle,
final long readaheadSize);
+ private native long maxSkippableInternalKeys(final long handle);
+ private native void setMaxSkippableInternalKeys(final long handle,
+ final long maxSkippableInternalKeys);
private native boolean ignoreRangeDeletions(final long handle);
private native void setIgnoreRangeDeletions(final long handle,
final boolean ignoreRangeDeletions);
final long upperBoundSliceHandle);
private native long iterateUpperBound(final long handle);
private native void setIterateLowerBound(final long handle,
- final long upperBoundSliceHandle);
+ final long lowerBoundSliceHandle);
private native long iterateLowerBound(final long handle);
-
- @Override protected final native void disposeInternal(final long handle);
-
+ private native void setTableFilter(final long handle,
+ final long tableFilterHandle);
+ private native void setIterStartSeqnum(final long handle, final long seqNum);
+ private native long iterStartSeqnum(final long handle);
}
import java.util.*;
import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.rocksdb.util.Environment;
}
}
+ /**
+ * Private constructor.
+ *
+ * @param nativeHandle The native handle of the C++ RocksDB object
+ */
+ protected RocksDB(final long nativeHandle) {
+ super(nativeHandle);
+ }
+
/**
* The factory constructor of RocksDB that opens a RocksDB instance given
* the path to the database using the default options w/ createIfMissing
* @see Options#setCreateIfMissing(boolean)
*/
public static RocksDB open(final String path) throws RocksDBException {
- // This allows to use the rocksjni default Options instead of
- // the c++ one.
- Options options = new Options();
+ final Options options = new Options();
options.setCreateIfMissing(true);
return open(options, path);
}
final List<ColumnFamilyDescriptor> columnFamilyDescriptors,
final List<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
- // This allows to use the rocksjni default Options instead of
- // the c++ one.
- DBOptions options = new DBOptions();
+ final DBOptions options = new DBOptions();
return open(options, path, columnFamilyDescriptors, columnFamilyHandles);
}
return db;
}
+
+ /**
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * This is similar to {@link #closeE()} except that it
+ * silently ignores any errors.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
/**
* Static method to determine all available column families for a
* rocksdb database identified by path
path));
}
- protected void storeOptionsInstance(DBOptionsInterface options) {
- options_ = options;
+ /**
+ * Creates a new column family with the name columnFamilyName and
+ * allocates a ColumnFamilyHandle within an internal structure.
+ * The ColumnFamilyHandle is automatically disposed with DB disposal.
+ *
+ * @param columnFamilyDescriptor column family to be created.
+ * @return {@link org.rocksdb.ColumnFamilyHandle} instance.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public ColumnFamilyHandle createColumnFamily(
+ final ColumnFamilyDescriptor columnFamilyDescriptor)
+ throws RocksDBException {
+ return new ColumnFamilyHandle(this, createColumnFamily(nativeHandle_,
+ columnFamilyDescriptor.getName(),
+ columnFamilyDescriptor.getName().length,
+ columnFamilyDescriptor.getOptions().nativeHandle_));
}
- private static void checkBounds(int offset, int len, int size) {
- if ((offset | len | (offset + len) | (size - (offset + len))) < 0) {
- throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size));
+ /**
+ * Bulk create column families with the same column family options.
+ *
+ * @param columnFamilyOptions the options for the column families.
+ * @param columnFamilyNames the names of the column families.
+ *
+ * @return the handles to the newly created column families.
+ */
+ public List<ColumnFamilyHandle> createColumnFamilies(
+ final ColumnFamilyOptions columnFamilyOptions,
+ final List<byte[]> columnFamilyNames) throws RocksDBException {
+ final byte[][] cfNames = columnFamilyNames.toArray(
+ new byte[0][]);
+ final long[] cfHandles = createColumnFamilies(nativeHandle_,
+ columnFamilyOptions.nativeHandle_, cfNames);
+ final List<ColumnFamilyHandle> columnFamilyHandles =
+ new ArrayList<>(cfHandles.length);
+ for (int i = 0; i < cfHandles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(this, cfHandles[i]));
+ }
+ return columnFamilyHandles;
+ }
+
+ /**
+ * Bulk create column families with the same column family options.
+ *
+ * @param columnFamilyDescriptors the descriptions of the column families.
+ *
+ * @return the handles to the newly created column families.
+ */
+ public List<ColumnFamilyHandle> createColumnFamilies(
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors)
+ throws RocksDBException {
+ final long[] cfOptsHandles = new long[columnFamilyDescriptors.size()];
+ final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][];
+ for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
+ final ColumnFamilyDescriptor columnFamilyDescriptor
+ = columnFamilyDescriptors.get(i);
+ cfOptsHandles[i] = columnFamilyDescriptor.getOptions().nativeHandle_;
+ cfNames[i] = columnFamilyDescriptor.getName();
+ }
+ final long[] cfHandles = createColumnFamilies(nativeHandle_,
+ cfOptsHandles, cfNames);
+ final List<ColumnFamilyHandle> columnFamilyHandles =
+ new ArrayList<>(cfHandles.length);
+ for (int i = 0; i < cfHandles.length; i++) {
+ columnFamilyHandles.add(new ColumnFamilyHandle(this, cfHandles[i]));
+ }
+ return columnFamilyHandles;
+ }
+
+ /**
+ * Drops the column family specified by {@code columnFamilyHandle}. This call
+ * only records a drop record in the manifest and prevents the column
+ * family from flushing and compacting.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_);
+ }
+
+ // Bulk drop column families. This call only records drop records in the
+ // manifest and prevents the column families from flushing and compacting.
+ // In case of error, the request may succeed partially. User may call
+ // ListColumnFamilies to check the result.
+ public void dropColumnFamilies(
+ final List<ColumnFamilyHandle> columnFamilies) throws RocksDBException {
+ final long[] cfHandles = new long[columnFamilies.size()];
+ for (int i = 0; i < columnFamilies.size(); i++) {
+ cfHandles[i] = columnFamilies.get(i).nativeHandle_;
}
+ dropColumnFamilies(nativeHandle_, cfHandles);
}
+ //TODO(AR) what about DestroyColumnFamilyHandle
+
/**
* Set the database entry for "key" to "value".
*
}
/**
- * Set the database entry for "key" to "value"
+ * Set the database entry for "key" to "value".
*
* @param key The specified key to be inserted
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * must be non-negative and no larger than ("key".length - offset)
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param value the value associated with the specified key
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
- *
- * @throws RocksDBException thrown if errors happens in underlying native library.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if errors happens in underlying native
+ * library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
*/
- public void put(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
+ public void put(final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, key, offset, len, value, vOffset, vLen);
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key The specified key to be inserted
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * must be non-negative and no larger than ("key".length - offset)
+ * @param offset the offset of the "key" array to be used, must
+ * be non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param value the value associated with the specified key
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
- *
- * @throws RocksDBException thrown if errors happens in underlying native library.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if errors happens in underlying native
+ * library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
*/
- public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
+ public void put(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, key, offset, len, value, vOffset, vLen,
*
* @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key The specified key to be inserted
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * must be non-negative and no larger than ("key".length - offset)
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param value the value associated with the specified key
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
*/
- public void put(final WriteOptions writeOpts, byte[] key, int offset, int len, byte[] value, int vOffset, int vLen) throws RocksDBException {
+ public void put(final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, writeOpts.nativeHandle_,
key, offset, len, value, vOffset, vLen);
}
-
/**
* Set the database entry for "key" to "value" for the specified
* column family.
* instance
* @param writeOpts {@link org.rocksdb.WriteOptions} instance.
* @param key The specified key to be inserted
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * must be non-negative and no larger than ("key".length - offset)
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param value the value associated with the specified key
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
*/
public void put(final ColumnFamilyHandle columnFamilyHandle,
- final WriteOptions writeOpts, final byte[] key, int offset, int len,
- final byte[] value, int vOffset, int vLen) throws RocksDBException {
+ final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
put(nativeHandle_, writeOpts.nativeHandle_, key, offset, len, value,
}
/**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * @param key Key to delete within database
*
- * @param key byte array of a key to search for
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
- * @return boolean value indicating if key does not exist or might exist.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #delete(byte[])}
*/
- public boolean keyMayExist(final byte[] key, final StringBuilder value) {
- return keyMayExist(nativeHandle_, key, 0, key.length, value);
+ @Deprecated
+ public void remove(final byte[] key) throws RocksDBException {
+ delete(key);
}
/**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
- *
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * @param key byte array of a key to search for
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
+ * @param key Key to delete within database
*
- * @return boolean value indicating if key does not exist or might exist.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
*/
- public boolean keyMayExist(final byte[] key, int offset, int len, final StringBuilder value) {
- checkBounds(offset, len, key.length);
- return keyMayExist(nativeHandle_, key, offset, len, value);
+ public void delete(final byte[] key) throws RocksDBException {
+ delete(nativeHandle_, key, 0, key.length);
}
/**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and no larger than ("key".length - offset)
*
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
- * @param key byte array of a key to search for
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
- * @return boolean value indicating if key does not exist or might exist.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
*/
- public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key, final StringBuilder value) {
- return keyMayExist(nativeHandle_, key, 0, key.length,
- columnFamilyHandle.nativeHandle_, value);
+ public void delete(final byte[] key, final int offset, final int len)
+ throws RocksDBException {
+ delete(nativeHandle_, key, offset, len);
}
/**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key Key to delete within database
*
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
- * @param key byte array of a key to search for
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
- * @return boolean value indicating if key does not exist or might exist.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ *
+ * @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])}
*/
- public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key, int offset, int len, final StringBuilder value) {
- checkBounds(offset, len, key.length);
- return keyMayExist(nativeHandle_, key, offset, len,
- columnFamilyHandle.nativeHandle_, value);
+ @Deprecated
+ public void remove(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ delete(columnFamilyHandle, key);
}
-
/**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key Key to delete within database
*
- * @param readOptions {@link ReadOptions} instance
- * @param key byte array of a key to search for
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
- * @return boolean value indicating if key does not exist or might exist.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
*/
- public boolean keyMayExist(final ReadOptions readOptions,
- final byte[] key, final StringBuilder value) {
- return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
- key, 0, key.length, value);
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_);
}
/**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used,
+ * must be non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("value".length - offset)
*
- * @param readOptions {@link ReadOptions} instance
- * @param key byte array of a key to search for
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
- * @return boolean value indicating if key does not exist or might exist.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
*/
- public boolean keyMayExist(final ReadOptions readOptions,
- final byte[] key, int offset, int len, final StringBuilder value) {
- checkBounds(offset, len, key.length);
- return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
- key, offset, len, value);
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final int offset, final int len)
+ throws RocksDBException {
+ delete(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_);
}
/**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
- *
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * @param readOptions {@link ReadOptions} instance
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
- * @param key byte array of a key to search for
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
- * @return boolean value indicating if key does not exist or might exist.
- */
- public boolean keyMayExist(final ReadOptions readOptions,
- final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
- final StringBuilder value) {
- return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
- key, 0, key.length, columnFamilyHandle.nativeHandle_,
- value);
- }
-
- /**
- * If the key definitely does not exist in the database, then this method
- * returns false, else true.
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
*
- * This check is potentially lighter-weight than invoking DB::Get(). One way
- * to make this lighter weight is to avoid doing any IOs.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
*
- * @param readOptions {@link ReadOptions} instance
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
- * @param key byte array of a key to search for
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * @param value StringBuilder instance which is a out parameter if a value is
- * found in block-cache.
- * @return boolean value indicating if key does not exist or might exist.
+ * @deprecated Use {@link #delete(WriteOptions, byte[])}
*/
- public boolean keyMayExist(final ReadOptions readOptions,
- final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len,
- final StringBuilder value) {
- checkBounds(offset, len, key.length);
- return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
- key, offset, len, columnFamilyHandle.nativeHandle_,
- value);
+ @Deprecated
+ public void remove(final WriteOptions writeOpt, final byte[] key)
+ throws RocksDBException {
+ delete(writeOpt, key);
}
/**
- * Apply the specified updates to the database.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * @param writeOpts WriteOptions instance
- * @param updates WriteBatch instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public void write(final WriteOptions writeOpts, final WriteBatch updates)
+ public void delete(final WriteOptions writeOpt, final byte[] key)
throws RocksDBException {
- write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length);
}
/**
- * Apply the specified updates to the database.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * @param writeOpts WriteOptions instance
- * @param updates WriteBatchWithIndex instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and no larger than ("key".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public void write(final WriteOptions writeOpts,
- final WriteBatchWithIndex updates) throws RocksDBException {
- write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
+ public void delete(final WriteOptions writeOpt, final byte[] key,
+ final int offset, final int len) throws RocksDBException {
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len);
}
/**
- * Add merge operand for key/value pair.
+ * Remove the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * @param key the specified key to be merged.
- * @param value the value to be merged with the current value for
- * the specified key.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
+ *
+ * @deprecated Use {@link #delete(ColumnFamilyHandle, WriteOptions, byte[])}
*/
- public void merge(final byte[] key, final byte[] value)
- throws RocksDBException {
- merge(nativeHandle_, key, 0, key.length, value, 0, value.length);
+ @Deprecated
+ public void remove(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key) throws RocksDBException {
+ delete(columnFamilyHandle, writeOpt, key);
}
/**
- * Add merge operand for key/value pair.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * @param key the specified key to be merged.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * @param value the value to be merged with the current value for the specified key.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public void merge(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen)
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key)
throws RocksDBException {
- checkBounds(offset, len, key.length);
- checkBounds(vOffset, vLen, value.length);
- merge(nativeHandle_, key, offset, len, value, vOffset, vLen);
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length,
+ columnFamilyHandle.nativeHandle_);
}
-
/**
- * Add merge operand for key/value pair in a ColumnFamily.
+ * Delete the database entry (if any) for "key". Returns OK on
+ * success, and a non-OK status on error. It is not an error if "key"
+ * did not exist in the database.
*
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
- * @param key the specified key to be merged.
- * @param value the value to be merged with the current value for
- * the specified key.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param key Key to delete within database
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and no larger than ("key".length - offset)
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public void merge(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key, final byte[] value) throws RocksDBException {
- merge(nativeHandle_, key, 0, key.length, value, 0, value.length,
+ public void delete(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key, final int offset,
+ final int len) throws RocksDBException {
+ delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len,
columnFamilyHandle.nativeHandle_);
}
/**
- * Add merge operand for key/value pair in a ColumnFamily.
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
*
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
- * @param key the specified key to be merged.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * @param value the value to be merged with the current value for
- * the specified key.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * native library.
*/
- public void merge(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
- checkBounds(offset, len, key.length);
- checkBounds(vOffset, vLen, value.length);
- merge(nativeHandle_, key, offset, len, value, vOffset, vLen,
- columnFamilyHandle.nativeHandle_);
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final byte[] key) throws RocksDBException {
+ singleDelete(nativeHandle_, key, key.length);
}
/**
- * Add merge operand for key/value pair.
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
*
- * @param writeOpts {@link WriteOptions} for this write.
- * @param key the specified key to be merged.
- * @param value the value to be merged with the current value for
- * the specified key.
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * @param columnFamilyHandle The column family to delete the key from
+ * @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * native library.
*/
- public void merge(final WriteOptions writeOpts, final byte[] key,
- final byte[] value) throws RocksDBException {
- merge(nativeHandle_, writeOpts.nativeHandle_,
- key, 0, key.length, value, 0, value.length);
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key) throws RocksDBException {
+ singleDelete(nativeHandle_, key, key.length,
+ columnFamilyHandle.nativeHandle_);
}
/**
- * Add merge operand for key/value pair.
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
*
- * @param writeOpts {@link WriteOptions} for this write.
- * @param key the specified key to be merged.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- * @param value the value to be merged with the current value for
- * the specified key.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
+ *
+ * @param writeOpt Write options for the delete
+ * @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * native library.
*/
- public void merge(final WriteOptions writeOpts, final byte[] key, int offset, int len,
- final byte[] value, int vOffset, int vLen) throws RocksDBException {
- checkBounds(offset, len, key.length);
- checkBounds(vOffset, vLen, value.length);
- merge(nativeHandle_, writeOpts.nativeHandle_,
- key, offset, len, value, vOffset, vLen);
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final WriteOptions writeOpt, final byte[] key)
+ throws RocksDBException {
+ singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
}
/**
- * Add merge operand for key/value pair.
+ * Remove the database entry for {@code key}. Requires that the key exists
+ * and was not overwritten. It is not an error if the key did not exist
+ * in the database.
*
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
- * @param writeOpts {@link WriteOptions} for this write.
- * @param key the specified key to be merged.
- * @param value the value to be merged with the current value for
- * the specified key.
+ * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
+ * times), then the result of calling SingleDelete() on this key is undefined.
+ * SingleDelete() only behaves correctly if there has been only one Put()
+ * for this key since the previous call to SingleDelete() for this key.
+ *
+ * This feature is currently an experimental performance optimization
+ * for a very specific workload. It is up to the caller to ensure that
+ * SingleDelete is only used for a key that is not deleted using Delete() or
+ * written using Merge(). Mixing SingleDelete operations with Deletes and
+ * Merges can result in undefined behavior.
+ *
+ * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
+ *
+ * @param columnFamilyHandle The column family to delete the key from
+ * @param writeOpt Write options for the delete
+ * @param key Key to delete within database
*
* @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * native library.
*/
- public void merge(final ColumnFamilyHandle columnFamilyHandle,
- final WriteOptions writeOpts, final byte[] key,
- final byte[] value) throws RocksDBException {
- merge(nativeHandle_, writeOpts.nativeHandle_,
- key, 0, key.length, value, 0, value.length,
+ @Experimental("Performance optimization for a very specific workload")
+ public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] key) throws RocksDBException {
+ singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length,
columnFamilyHandle.nativeHandle_);
}
+
/**
- * Add merge operand for key/value pair.
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param beginKey First key to delete within database (inclusive)
+ * @param endKey Last key to delete within database (exclusive)
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public void deleteRange(final byte[] beginKey, final byte[] endKey)
+ throws RocksDBException {
+ deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
+ endKey.length);
+ }
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance
+ * @param beginKey First key to delete within database (inclusive)
+ * @param endKey Last key to delete within database (exclusive)
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public void deleteRange(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] beginKey, final byte[] endKey) throws RocksDBException {
+ deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
+ endKey.length, columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param beginKey First key to delete within database (inclusive)
+ * @param endKey Last key to delete within database (exclusive)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey,
+ final byte[] endKey) throws RocksDBException {
+ deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0,
+ beginKey.length, endKey, 0, endKey.length);
+ }
+
+ /**
+ * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
+ * including "beginKey" and excluding "endKey". a non-OK status on error. It
+ * is not an error if no keys exist in the range ["beginKey", "endKey").
+ *
+ * Delete the database entry (if any) for "key". Returns OK on success, and a
+ * non-OK status on error. It is not an error if "key" did not exist in the
+ * database.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance
+ * @param writeOpt WriteOptions to be used with delete operation
+ * @param beginKey First key to delete within database (included)
+ * @param endKey Last key to delete within database (excluded)
+ *
+ * @throws RocksDBException thrown if error happens in underlying native
+ * library.
+ */
+ public void deleteRange(final ColumnFamilyHandle columnFamilyHandle,
+ final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey)
+ throws RocksDBException {
+ deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0,
+ beginKey.length, endKey, 0, endKey.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for the
+ * specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final byte[] key, final byte[] value)
+ throws RocksDBException {
+ merge(nativeHandle_, key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value to be merged with the current value for the
+ * specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and must be non-negative and no larger than
+ * ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(final byte[] key, int offset, int len, final byte[] value,
+ final int vOffset, final int vLen) throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ merge(nativeHandle_, key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Add merge operand for key/value pair in a ColumnFamily.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final byte[] value) throws RocksDBException {
+ merge(nativeHandle_, key, 0, key.length, value, 0, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Add merge operand for key/value pair in a ColumnFamily.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * must be non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final int offset, final int len, final byte[] value,
+ final int vOffset, final int vLen) throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ merge(nativeHandle_, key, offset, len, value, vOffset, vLen,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
*
- * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
* @param writeOpts {@link WriteOptions} for this write.
* @param key the specified key to be merged.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
* @param value the value to be merged with the current value for
* the specified key.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void merge(final WriteOptions writeOpts, final byte[] key,
+ final byte[] value) throws RocksDBException {
+ merge(nativeHandle_, writeOpts.nativeHandle_,
+ key, 0, key.length, value, 0, value.length);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param writeOpts {@link WriteOptions} for this write.
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("value".length - offset)
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
+ checkBounds(offset, len, key.length);
+ checkBounds(vOffset, vLen, value.length);
+ merge(nativeHandle_, writeOpts.nativeHandle_,
+ key, offset, len, value, vOffset, vLen);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param writeOpts {@link WriteOptions} for this write.
+ * @param key the specified key to be merged.
+ * @param value the value to be merged with the current value for the
+ * specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public void merge(final ColumnFamilyHandle columnFamilyHandle,
- final WriteOptions writeOpts, final byte[] key, int offset, int len,
- final byte[] value, int vOffset, int vLen) throws RocksDBException {
+ final WriteOptions writeOpts, final byte[] key, final byte[] value)
+ throws RocksDBException {
+ merge(nativeHandle_, writeOpts.nativeHandle_,
+ key, 0, key.length, value, 0, value.length,
+ columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Add merge operand for key/value pair.
+ *
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param writeOpts {@link WriteOptions} for this write.
+ * @param key the specified key to be merged.
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
+ * @param value the value to be merged with the current value for
+ * the specified key.
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ * @throws IndexOutOfBoundsException if an offset or length is out of bounds
+ */
+ public void merge(
+ final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts,
+ final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
merge(nativeHandle_, writeOpts.nativeHandle_,
columnFamilyHandle.nativeHandle_);
}
+ /**
+ * Apply the specified updates to the database.
+ *
+ * @param writeOpts WriteOptions instance
+ * @param updates WriteBatch instance
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void write(final WriteOptions writeOpts, final WriteBatch updates)
+ throws RocksDBException {
+ write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
+ }
+
+ /**
+ * Apply the specified updates to the database.
+ *
+ * @param writeOpts WriteOptions instance
+ * @param updates WriteBatchWithIndex instance
+ *
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
+ */
+ public void write(final WriteOptions writeOpts,
+ final WriteBatchWithIndex updates) throws RocksDBException {
+ write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_);
+ }
+
// TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice
// when we could communicate better status into, also the C++ code show that -2 could be returned
* Get the value associated with the specified key within column family*
*
* @param key the key to retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param value the out-value to receive the retrieved value.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "value".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and and no larger than ("value".length - offset)
*
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public int get(final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen) throws RocksDBException {
+ public int get(final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
return get(nativeHandle_, key, offset, len, value, vOffset, vLen);
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key to retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * an no larger than ("key".length - offset)
* @param value the out-value to receive the retrieved value.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
*
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, int offset, int len,
- final byte[] value, int vOffset, int vLen) throws RocksDBException, IllegalArgumentException {
+ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final int offset, final int len, final byte[] value, final int vOffset,
+ final int vLen) throws RocksDBException, IllegalArgumentException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
return get(nativeHandle_, key, offset, len, value, vOffset, vLen,
*
* @param opt {@link org.rocksdb.ReadOptions} instance.
* @param key the key to retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param value the out-value to receive the retrieved value.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, must be
+ * non-negative and no larger than ("value".length - offset)
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public int get(final ReadOptions opt, final byte[] key, int offset, int len,
- final byte[] value, int vOffset, int vLen) throws RocksDBException {
+ public int get(final ReadOptions opt, final byte[] key, final int offset,
+ final int len, final byte[] value, final int vOffset, final int vLen)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
return get(nativeHandle_, opt.nativeHandle_,
* instance
* @param opt {@link org.rocksdb.ReadOptions} instance.
* @param key the key to retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be
+ * non-negative and and no larger than ("key".length - offset)
* @param value the out-value to receive the retrieved value.
- * @param vOffset the offset of the "value" array to be used, must be non-negative and
- * no longer than "key".length
- * @param vLen the length of the "value" array to be used, must be non-negative and
- * must be non-negative and no larger than ("value".length - offset)
+ * @param vOffset the offset of the "value" array to be used, must be
+ * non-negative and no longer than "key".length
+ * @param vLen the length of the "value" array to be used, and must be
+ * non-negative and no larger than ("value".length - offset)
* @return The size of the actual value that matches the specified
* {@code key} in byte. If the return value is greater than the
* length of {@code value}, then it indicates that the size of the
* native library.
*/
public int get(final ColumnFamilyHandle columnFamilyHandle,
- final ReadOptions opt, final byte[] key, int offset, int len, final byte[] value, int vOffset, int vLen)
+ final ReadOptions opt, final byte[] key, final int offset, final int len,
+ final byte[] value, final int vOffset, final int vLen)
throws RocksDBException {
checkBounds(offset, len, key.length);
checkBounds(vOffset, vLen, value.length);
*
* @param key the key retrieve the value.
* @return a byte array storing the value associated with the input key if
- * any. null if it does not find the specified key.
+ * any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @return a byte array storing the value associated with the input key if
- * any. null if it does not find the specified key.
+ * any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public byte[] get(final byte[] key, int offset, int len) throws RocksDBException {
+ public byte[] get(final byte[] key, final int offset,
+ final int len) throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, key, offset, len);
}
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @return a byte array storing the value associated with the input key if
- * any. null if it does not find the specified key.
+ * any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key, int offset, int len) throws RocksDBException {
+ final byte[] key, final int offset, final int len)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, key, offset, len,
columnFamilyHandle.nativeHandle_);
* returned if the specified key is not found.
*
* @param key the key retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
- * any. null if it does not find the specified key.
+ * any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
- public byte[] get(final ReadOptions opt, final byte[] key, int offset, int len)
- throws RocksDBException {
+ public byte[] get(final ReadOptions opt, final byte[] key, final int offset,
+ final int len) throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, opt.nativeHandle_, key, offset, len);
}
* @param key the key retrieve the value.
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
- * any. null if it does not find the specified key.
+ * any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
* @param key the key retrieve the value.
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than ("key".length - offset)
* @param opt Read options.
* @return a byte array storing the value associated with the input key if
- * any. null if it does not find the specified key.
+ * any. null if it does not find the specified key.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
*/
public byte[] get(final ColumnFamilyHandle columnFamilyHandle,
- final ReadOptions opt, final byte[] key, int offset, int len) throws RocksDBException {
+ final ReadOptions opt, final byte[] key, final int offset, final int len)
+ throws RocksDBException {
checkBounds(offset, len, key.length);
return get(nativeHandle_, opt.nativeHandle_, key, offset, len,
columnFamilyHandle.nativeHandle_);
throws RocksDBException {
assert(keys.size() != 0);
- final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
return keyValueMap;
}
- private static int computeCapacityHint(final int estimatedNumberOfItems) {
- // Default load factor for HashMap is 0.75, so N * 1.5 will be at the load
- // limit. We add +1 for a buffer.
- return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0);
- }
-
/**
* Returns a map of keys for which values were found in DB.
* <p>
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys List of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
- * entry is the corresponding value in DB.
+ * entry is the corresponding value in DB.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
- final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
* @param opt Read options.
* @param keys of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
- * entry is the corresponding value in DB.
+ * entry is the corresponding value in DB.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
final List<byte[]> keys) throws RocksDBException {
assert(keys.size() != 0);
- final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
* {@link org.rocksdb.ColumnFamilyHandle} instances.
* @param keys of keys for which values need to be retrieved.
* @return Map where key of map is the key passed by user and value for map
- * entry is the corresponding value in DB.
+ * entry is the corresponding value in DB.
*
* @throws RocksDBException thrown if error happens in underlying
* native library.
cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
}
- final byte[][] keysArray = keys.toArray(new byte[keys.size()][]);
+ final byte[][] keysArray = keys.toArray(new byte[0][]);
final int keyOffsets[] = new int[keysArray.length];
final int keyLengths[] = new int[keysArray.length];
for(int i = 0; i < keyLengths.length; i++) {
}
/**
- * Remove the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
- *
- * @param key Key to delete within database
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @deprecated Use {@link #delete(byte[])}
+ * @param key byte array of a key to search for
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ * @return boolean value indicating if key does not exist or might exist.
*/
- @Deprecated
- public void remove(final byte[] key) throws RocksDBException {
- delete(key);
+ public boolean keyMayExist(final byte[] key, final StringBuilder value) {
+ return keyMayExist(nativeHandle_, key, 0, key.length, value);
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @param key Key to delete within database
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ *
+ * @return boolean value indicating if key does not exist or might exist.
*/
- public void delete(final byte[] key) throws RocksDBException {
- delete(nativeHandle_, key, 0, key.length);
+ public boolean keyMayExist(final byte[] key, final int offset, final int len,
+ final StringBuilder value) {
+ checkBounds(offset, len, key.length);
+ return keyMayExist(nativeHandle_, key, offset, len, value);
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @param key Key to delete within database
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key byte array of a key to search for
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ * @return boolean value indicating if key does not exist or might exist.
*/
- public void delete(final byte[] key, int offset, int len) throws RocksDBException {
- delete(nativeHandle_, key, offset, len);
+ public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, final StringBuilder value) {
+ return keyMayExist(nativeHandle_, key, 0, key.length,
+ columnFamilyHandle.nativeHandle_, value);
}
/**
- * Remove the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
- *
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @param key Key to delete within database
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @deprecated Use {@link #delete(ColumnFamilyHandle, byte[])}
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ * @return boolean value indicating if key does not exist or might exist.
*/
- @Deprecated
- public void remove(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key) throws RocksDBException {
- delete(columnFamilyHandle, key);
+ public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] key, int offset, int len, final StringBuilder value) {
+ checkBounds(offset, len, key.length);
+ return keyMayExist(nativeHandle_, key, offset, len,
+ columnFamilyHandle.nativeHandle_, value);
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @param key Key to delete within database
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @param readOptions {@link ReadOptions} instance
+ * @param key byte array of a key to search for
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ * @return boolean value indicating if key does not exist or might exist.
*/
- public void delete(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key) throws RocksDBException {
- delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_);
+ public boolean keyMayExist(final ReadOptions readOptions,
+ final byte[] key, final StringBuilder value) {
+ return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
+ key, 0, key.length, value);
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @param key Key to delete within database
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @param readOptions {@link ReadOptions} instance
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ * @return boolean value indicating if key does not exist or might exist.
*/
- public void delete(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key, int offset, int len) throws RocksDBException {
- delete(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_);
+ public boolean keyMayExist(final ReadOptions readOptions,
+ final byte[] key, final int offset, final int len,
+ final StringBuilder value) {
+ checkBounds(offset, len, key.length);
+ return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
+ key, offset, len, value);
}
/**
- * Remove the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
- *
- * @param writeOpt WriteOptions to be used with delete operation
- * @param key Key to delete within database
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @deprecated Use {@link #delete(WriteOptions, byte[])}
+ * @param readOptions {@link ReadOptions} instance
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key byte array of a key to search for
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ * @return boolean value indicating if key does not exist or might exist.
*/
- @Deprecated
- public void remove(final WriteOptions writeOpt, final byte[] key)
- throws RocksDBException {
- delete(writeOpt, key);
+ public boolean keyMayExist(final ReadOptions readOptions,
+ final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final StringBuilder value) {
+ return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
+ key, 0, key.length, columnFamilyHandle.nativeHandle_,
+ value);
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * If the key definitely does not exist in the database, then this method
+ * returns false, else true.
*
- * @param writeOpt WriteOptions to be used with delete operation
- * @param key Key to delete within database
+ * This check is potentially lighter-weight than invoking DB::Get(). One way
+ * to make this lighter weight is to avoid doing any IOs.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @param readOptions {@link ReadOptions} instance
+ * @param columnFamilyHandle {@link ColumnFamilyHandle} instance
+ * @param key byte array of a key to search for
+ * @param offset the offset of the "key" array to be used, must be
+ * non-negative and no larger than "key".length
+ * @param len the length of the "key" array to be used, must be non-negative
+ * and no larger than "key".length
+ * @param value StringBuilder instance which is a out parameter if a value is
+ * found in block-cache.
+ * @return boolean value indicating if key does not exist or might exist.
*/
- public void delete(final WriteOptions writeOpt, final byte[] key)
- throws RocksDBException {
- delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length);
+ public boolean keyMayExist(final ReadOptions readOptions,
+ final ColumnFamilyHandle columnFamilyHandle, final byte[] key,
+ final int offset, final int len, final StringBuilder value) {
+ checkBounds(offset, len, key.length);
+ return keyMayExist(nativeHandle_, readOptions.nativeHandle_,
+ key, offset, len, columnFamilyHandle.nativeHandle_,
+ value);
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
*
- * @param writeOpt WriteOptions to be used with delete operation
- * @param key Key to delete within database
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @return instance of iterator object.
*/
- public void delete(final WriteOptions writeOpt, final byte[] key, int offset, int len)
- throws RocksDBException {
- delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len);
+ public RocksIterator newIterator() {
+ return new RocksIterator(this, iterator(nativeHandle_));
}
/**
- * Remove the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
- *
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @param writeOpt WriteOptions to be used with delete operation
- * @param key Key to delete within database
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
*
- * @deprecated Use {@link #delete(ColumnFamilyHandle, WriteOptions, byte[])}
+ * @param readOptions {@link ReadOptions} instance.
+ * @return instance of iterator object.
*/
- @Deprecated
- public void remove(final ColumnFamilyHandle columnFamilyHandle,
- final WriteOptions writeOpt, final byte[] key)
- throws RocksDBException {
- delete(columnFamilyHandle, writeOpt, key);
+ public RocksIterator newIterator(final ReadOptions readOptions) {
+ return new RocksIterator(this, iterator(nativeHandle_,
+ readOptions.nativeHandle_));
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
+ *
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
- * @param writeOpt WriteOptions to be used with delete operation
- * @param key Key to delete within database
- *
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @return instance of iterator object.
*/
- public void delete(final ColumnFamilyHandle columnFamilyHandle,
- final WriteOptions writeOpt, final byte[] key)
- throws RocksDBException {
- delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length,
- columnFamilyHandle.nativeHandle_);
+ public RocksIterator newIterator(
+ final ColumnFamilyHandle columnFamilyHandle) {
+ return new RocksIterator(this, iteratorCF(nativeHandle_,
+ columnFamilyHandle.nativeHandle_));
}
/**
- * Delete the database entry (if any) for "key". Returns OK on
- * success, and a non-OK status on error. It is not an error if "key"
- * did not exist in the database.
+ * <p>Return a heap-allocated iterator over the contents of the
+ * database. The result of newIterator() is initially invalid
+ * (caller must call one of the Seek methods on the iterator
+ * before using it).</p>
+ *
+ * <p>Caller should close the iterator when it is no longer needed.
+ * The returned iterator should be closed before this db is closed.
+ * </p>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance
- * @param writeOpt WriteOptions to be used with delete operation
- * @param key Key to delete within database
- * @param offset the offset of the "key" array to be used, must be non-negative and
- * no larger than "key".length
- * @param len the length of the "key" array to be used, must be non-negative and
- *
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @param readOptions {@link ReadOptions} instance.
+ * @return instance of iterator object.
*/
- public void delete(final ColumnFamilyHandle columnFamilyHandle,
- final WriteOptions writeOpt, final byte[] key, int offset, int len)
- throws RocksDBException {
- delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len,
- columnFamilyHandle.nativeHandle_);
+ public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle,
+ final ReadOptions readOptions) {
+ return new RocksIterator(this, iteratorCF(nativeHandle_,
+ columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_));
}
/**
- * Remove the database entry for {@code key}. Requires that the key exists
- * and was not overwritten. It is not an error if the key did not exist
- * in the database.
- *
- * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
- * times), then the result of calling SingleDelete() on this key is undefined.
- * SingleDelete() only behaves correctly if there has been only one Put()
- * for this key since the previous call to SingleDelete() for this key.
- *
- * This feature is currently an experimental performance optimization
- * for a very specific workload. It is up to the caller to ensure that
- * SingleDelete is only used for a key that is not deleted using Delete() or
- * written using Merge(). Mixing SingleDelete operations with Deletes and
- * Merges can result in undefined behavior.
+ * Returns iterators from a consistent database state across multiple
+ * column families. Iterators are heap allocated and need to be deleted
+ * before the db is deleted
*
- * @param key Key to delete within database
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
+ * instances
*
* @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * native library.
*/
- @Experimental("Performance optimization for a very specific workload")
- public void singleDelete(final byte[] key) throws RocksDBException {
- singleDelete(nativeHandle_, key, key.length);
+ public List<RocksIterator> newIterators(
+ final List<ColumnFamilyHandle> columnFamilyHandleList)
+ throws RocksDBException {
+ return newIterators(columnFamilyHandleList, new ReadOptions());
}
/**
- * Remove the database entry for {@code key}. Requires that the key exists
- * and was not overwritten. It is not an error if the key did not exist
- * in the database.
- *
- * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
- * times), then the result of calling SingleDelete() on this key is undefined.
- * SingleDelete() only behaves correctly if there has been only one Put()
- * for this key since the previous call to SingleDelete() for this key.
- *
- * This feature is currently an experimental performance optimization
- * for a very specific workload. It is up to the caller to ensure that
- * SingleDelete is only used for a key that is not deleted using Delete() or
- * written using Merge(). Mixing SingleDelete operations with Deletes and
- * Merges can result in undefined behavior.
+ * Returns iterators from a consistent database state across multiple
+ * column families. Iterators are heap allocated and need to be deleted
+ * before the db is deleted
*
- * @param columnFamilyHandle The column family to delete the key from
- * @param key Key to delete within database
+ * @param columnFamilyHandleList {@link java.util.List} containing
+ * {@link org.rocksdb.ColumnFamilyHandle} instances.
+ * @param readOptions {@link ReadOptions} instance.
+ * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
+ * instances
*
* @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * native library.
*/
- @Experimental("Performance optimization for a very specific workload")
- public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] key) throws RocksDBException {
- singleDelete(nativeHandle_, key, key.length,
- columnFamilyHandle.nativeHandle_);
+ public List<RocksIterator> newIterators(
+ final List<ColumnFamilyHandle> columnFamilyHandleList,
+ final ReadOptions readOptions) throws RocksDBException {
+
+ final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()];
+ for (int i = 0; i < columnFamilyHandleList.size(); i++) {
+ columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
+ }
+
+ final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles,
+ readOptions.nativeHandle_);
+
+ final List<RocksIterator> iterators = new ArrayList<>(
+ columnFamilyHandleList.size());
+ for (int i=0; i<columnFamilyHandleList.size(); i++){
+ iterators.add(new RocksIterator(this, iteratorRefs[i]));
+ }
+ return iterators;
}
+
/**
- * Remove the database entry for {@code key}. Requires that the key exists
- * and was not overwritten. It is not an error if the key did not exist
- * in the database.
- *
- * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
- * times), then the result of calling SingleDelete() on this key is undefined.
- * SingleDelete() only behaves correctly if there has been only one Put()
- * for this key since the previous call to SingleDelete() for this key.
- *
- * This feature is currently an experimental performance optimization
- * for a very specific workload. It is up to the caller to ensure that
- * SingleDelete is only used for a key that is not deleted using Delete() or
- * written using Merge(). Mixing SingleDelete operations with Deletes and
- * Merges can result in undefined behavior.
- *
- * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
+ * <p>Return a handle to the current DB state. Iterators created with
+ * this handle will all observe a stable snapshot of the current DB
+ * state. The caller must call ReleaseSnapshot(result) when the
+ * snapshot is no longer needed.</p>
*
- * @param writeOpt Write options for the delete
- * @param key Key to delete within database
+ * <p>nullptr will be returned if the DB fails to take a snapshot or does
+ * not support snapshot.</p>
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @return Snapshot {@link Snapshot} instance
*/
- @Experimental("Performance optimization for a very specific workload")
- public void singleDelete(final WriteOptions writeOpt, final byte[] key)
- throws RocksDBException {
- singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length);
+ public Snapshot getSnapshot() {
+ long snapshotHandle = getSnapshot(nativeHandle_);
+ if (snapshotHandle != 0) {
+ return new Snapshot(snapshotHandle);
+ }
+ return null;
}
/**
- * Remove the database entry for {@code key}. Requires that the key exists
- * and was not overwritten. It is not an error if the key did not exist
- * in the database.
- *
- * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple
- * times), then the result of calling SingleDelete() on this key is undefined.
- * SingleDelete() only behaves correctly if there has been only one Put()
- * for this key since the previous call to SingleDelete() for this key.
- *
- * This feature is currently an experimental performance optimization
- * for a very specific workload. It is up to the caller to ensure that
- * SingleDelete is only used for a key that is not deleted using Delete() or
- * written using Merge(). Mixing SingleDelete operations with Deletes and
- * Merges can result in undefined behavior.
- *
- * Note: consider setting {@link WriteOptions#setSync(boolean)} true.
+ * Release a previously acquired snapshot.
*
- * @param columnFamilyHandle The column family to delete the key from
- * @param writeOpt Write options for the delete
- * @param key Key to delete within database
+ * The caller must not use "snapshot" after this call.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @param snapshot {@link Snapshot} instance
*/
- @Experimental("Performance optimization for a very specific workload")
- public void singleDelete(final ColumnFamilyHandle columnFamilyHandle,
- final WriteOptions writeOpt, final byte[] key) throws RocksDBException {
- singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length,
- columnFamilyHandle.nativeHandle_);
+ public void releaseSnapshot(final Snapshot snapshot) {
+ if (snapshot != null) {
+ releaseSnapshot(nativeHandle_, snapshot.nativeHandle_);
+ }
}
/**
* <p>Valid property names include:
* <ul>
* <li>"rocksdb.num-files-at-level<N>" - return the number of files at
- * level <N>, where <N> is an ASCII representation of a level
- * number (e.g. "0").</li>
- * <li>"rocksdb.stats" - returns a multi-line string that describes statistics
- * about the internal operation of the DB.</li>
- * <li>"rocksdb.sstables" - returns a multi-line string that describes all
- * of the sstables that make up the db contents.</li>
- * </ul>
- *
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @param property to be fetched. See above for examples
- * @return property value
- *
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
- */
- public String getProperty(final ColumnFamilyHandle columnFamilyHandle,
- final String property) throws RocksDBException {
- return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_,
- property, property.length());
- }
-
- /**
- * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
- * including "beginKey" and excluding "endKey". a non-OK status on error. It
- * is not an error if no keys exist in the range ["beginKey", "endKey").
- *
- * Delete the database entry (if any) for "key". Returns OK on success, and a
- * non-OK status on error. It is not an error if "key" did not exist in the
- * database.
- *
- * @param beginKey
- * First key to delete within database (included)
- * @param endKey
- * Last key to delete within database (excluded)
- *
- * @throws RocksDBException
- * thrown if error happens in underlying native library.
- */
- public void deleteRange(final byte[] beginKey, final byte[] endKey) throws RocksDBException {
- deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length);
- }
-
- /**
- * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
- * including "beginKey" and excluding "endKey". a non-OK status on error. It
- * is not an error if no keys exist in the range ["beginKey", "endKey").
- *
- * Delete the database entry (if any) for "key". Returns OK on success, and a
- * non-OK status on error. It is not an error if "key" did not exist in the
- * database.
- *
- * @param columnFamilyHandle
- * {@link org.rocksdb.ColumnFamilyHandle} instance
- * @param beginKey
- * First key to delete within database (included)
- * @param endKey
- * Last key to delete within database (excluded)
- *
- * @throws RocksDBException
- * thrown if error happens in underlying native library.
- */
- public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] beginKey,
- final byte[] endKey) throws RocksDBException {
- deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, endKey.length,
- columnFamilyHandle.nativeHandle_);
- }
-
- /**
- * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
- * including "beginKey" and excluding "endKey". a non-OK status on error. It
- * is not an error if no keys exist in the range ["beginKey", "endKey").
- *
- * Delete the database entry (if any) for "key". Returns OK on success, and a
- * non-OK status on error. It is not an error if "key" did not exist in the
- * database.
- *
- * @param writeOpt
- * WriteOptions to be used with delete operation
- * @param beginKey
- * First key to delete within database (included)
- * @param endKey
- * Last key to delete within database (excluded)
- *
- * @throws RocksDBException
- * thrown if error happens in underlying native library.
- */
- public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey)
- throws RocksDBException {
- deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
- endKey.length);
- }
-
- /**
- * Removes the database entries in the range ["beginKey", "endKey"), i.e.,
- * including "beginKey" and excluding "endKey". a non-OK status on error. It
- * is not an error if no keys exist in the range ["beginKey", "endKey").
- *
- * Delete the database entry (if any) for "key". Returns OK on success, and a
- * non-OK status on error. It is not an error if "key" did not exist in the
- * database.
+ * level <N>, where <N> is an ASCII representation of a level
+ * number (e.g. "0").</li>
+ * <li>"rocksdb.stats" - returns a multi-line string that describes statistics
+ * about the internal operation of the DB.</li>
+ * <li>"rocksdb.sstables" - returns a multi-line string that describes all
+ * of the sstables that make up the db contents.</li>
+ * </ul>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @param writeOpt
- * WriteOptions to be used with delete operation
- * @param beginKey
- * First key to delete within database (included)
- * @param endKey
- * Last key to delete within database (excluded)
+ * instance, or null for the default column family.
+ * @param property to be fetched. See above for examples
+ * @return property value
*
- * @throws RocksDBException
- * thrown if error happens in underlying native library.
+ * @throws RocksDBException thrown if error happens in underlying
+ * native library.
*/
- public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpt,
- final byte[] beginKey, final byte[] endKey) throws RocksDBException {
- deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, beginKey.length, endKey, 0,
- endKey.length, columnFamilyHandle.nativeHandle_);
+ public String getProperty(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final String property) throws RocksDBException {
+ return getProperty(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ property, property.length());
}
/**
* native library.
*/
public String getProperty(final String property) throws RocksDBException {
- return getProperty0(nativeHandle_, property, property.length());
+ return getProperty(null, property);
+ }
+
+
+ /**
+ * Gets a property map.
+ *
+ * @param property to be fetched.
+ *
+ * @return the property map
+ *
+ * @throws RocksDBException if an error happens in the underlying native code.
+ */
+ public Map<String, String> getMapProperty(final String property)
+ throws RocksDBException {
+ return getMapProperty(null, property);
+ }
+
+ /**
+ * Gets a property map.
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param property to be fetched.
+ *
+ * @return the property map
+ *
+ * @throws RocksDBException if an error happens in the underlying native code.
+ */
+ public Map<String, String> getMapProperty(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final String property) throws RocksDBException {
+ return getMapProperty(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ property, property.length());
}
/**
* @throws RocksDBException if an error happens in the underlying native code.
*/
public long getLongProperty(final String property) throws RocksDBException {
- return getLongProperty(nativeHandle_, property, property.length());
+ return getLongProperty(null, property);
}
/**
* unsigned long using provided methods of type {@link Long}.</p>
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
+ * instance, or null for the default column family
* @param property to be fetched.
*
* @return numerical property value
*
* @throws RocksDBException if an error happens in the underlying native code.
*/
- public long getLongProperty(final ColumnFamilyHandle columnFamilyHandle,
+ public long getLongProperty(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
final String property) throws RocksDBException {
- return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_,
+ return getLongProperty(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
property, property.length());
}
- /**
+ /**
+ * Reset internal stats for DB and all column families.
+ *
+ * Note this doesn't reset {@link Options#statistics()} as it is not
+ * owned by DB.
+ */
+ public void resetStats() throws RocksDBException {
+ resetStats(nativeHandle_);
+ }
+
+ /**
* <p> Return sum of the getLongProperty of all the column families</p>
*
* <p><strong>Note</strong>: As the returned property is of type
*
* @throws RocksDBException if an error happens in the underlying native code.
*/
- public long getAggregatedLongProperty(final String property) throws RocksDBException {
- return getAggregatedLongProperty(nativeHandle_, property, property.length());
- }
-
- /**
- * <p>Return a heap-allocated iterator over the contents of the
- * database. The result of newIterator() is initially invalid
- * (caller must call one of the Seek methods on the iterator
- * before using it).</p>
- *
- * <p>Caller should close the iterator when it is no longer needed.
- * The returned iterator should be closed before this db is closed.
- * </p>
- *
- * @return instance of iterator object.
- */
- public RocksIterator newIterator() {
- return new RocksIterator(this, iterator(nativeHandle_));
+ public long getAggregatedLongProperty(final String property)
+ throws RocksDBException {
+ return getAggregatedLongProperty(nativeHandle_, property,
+ property.length());
}
/**
- * <p>Return a heap-allocated iterator over the contents of the
- * database. The result of newIterator() is initially invalid
- * (caller must call one of the Seek methods on the iterator
- * before using it).</p>
- *
- * <p>Caller should close the iterator when it is no longer needed.
- * The returned iterator should be closed before this db is closed.
- * </p>
+ * Get the approximate file system space used by keys in each range.
*
- * @param readOptions {@link ReadOptions} instance.
- * @return instance of iterator object.
- */
- public RocksIterator newIterator(final ReadOptions readOptions) {
- return new RocksIterator(this, iterator(nativeHandle_,
- readOptions.nativeHandle_));
- }
-
- /**
- * <p>Return a handle to the current DB state. Iterators created with
- * this handle will all observe a stable snapshot of the current DB
- * state. The caller must call ReleaseSnapshot(result) when the
- * snapshot is no longer needed.</p>
+ * Note that the returned sizes measure file system space usage, so
+ * if the user data compresses by a factor of ten, the returned
+ * sizes will be one-tenth the size of the corresponding user data size.
*
- * <p>nullptr will be returned if the DB fails to take a snapshot or does
- * not support snapshot.</p>
+ * If {@code sizeApproximationFlags} defines whether the returned size
+ * should include the recently written data in the mem-tables (if
+ * the mem-table type supports it), data serialized to disk, or both.
*
- * @return Snapshot {@link Snapshot} instance
- */
- public Snapshot getSnapshot() {
- long snapshotHandle = getSnapshot(nativeHandle_);
- if (snapshotHandle != 0) {
- return new Snapshot(snapshotHandle);
- }
- return null;
- }
-
- /**
- * Release a previously acquired snapshot. The caller must not
- * use "snapshot" after this call.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family
+ * @param ranges the ranges over which to approximate sizes
+ * @param sizeApproximationFlags flags to determine what to include in the
+ * approximation.
*
- * @param snapshot {@link Snapshot} instance
+ * @return the sizes
*/
- public void releaseSnapshot(final Snapshot snapshot) {
- if (snapshot != null) {
- releaseSnapshot(nativeHandle_, snapshot.nativeHandle_);
+ public long[] getApproximateSizes(
+ /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle,
+ final List<Range> ranges,
+ final SizeApproximationFlag... sizeApproximationFlags) {
+
+ byte flags = 0x0;
+ for (final SizeApproximationFlag sizeApproximationFlag
+ : sizeApproximationFlags) {
+ flags |= sizeApproximationFlag.getValue();
}
- }
- /**
- * <p>Return a heap-allocated iterator over the contents of the
- * database. The result of newIterator() is initially invalid
- * (caller must call one of the Seek methods on the iterator
- * before using it).</p>
- *
- * <p>Caller should close the iterator when it is no longer needed.
- * The returned iterator should be closed before this db is closed.
- * </p>
- *
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @return instance of iterator object.
- */
- public RocksIterator newIterator(
- final ColumnFamilyHandle columnFamilyHandle) {
- return new RocksIterator(this, iteratorCF(nativeHandle_,
- columnFamilyHandle.nativeHandle_));
+ return getApproximateSizes(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ toRangeSliceHandles(ranges), flags);
}
/**
- * <p>Return a heap-allocated iterator over the contents of the
- * database. The result of newIterator() is initially invalid
- * (caller must call one of the Seek methods on the iterator
- * before using it).</p>
+ * Get the approximate file system space used by keys in each range for
+ * the default column family.
*
- * <p>Caller should close the iterator when it is no longer needed.
- * The returned iterator should be closed before this db is closed.
- * </p>
+ * Note that the returned sizes measure file system space usage, so
+ * if the user data compresses by a factor of ten, the returned
+ * sizes will be one-tenth the size of the corresponding user data size.
*
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- * @param readOptions {@link ReadOptions} instance.
- * @return instance of iterator object.
- */
- public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle,
- final ReadOptions readOptions) {
- return new RocksIterator(this, iteratorCF(nativeHandle_,
- columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_));
- }
-
- /**
- * Returns iterators from a consistent database state across multiple
- * column families. Iterators are heap allocated and need to be deleted
- * before the db is deleted
+ * If {@code sizeApproximationFlags} defines whether the returned size
+ * should include the recently written data in the mem-tables (if
+ * the mem-table type supports it), data serialized to disk, or both.
*
- * @param columnFamilyHandleList {@link java.util.List} containing
- * {@link org.rocksdb.ColumnFamilyHandle} instances.
- * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
- * instances
+ * @param ranges the ranges over which to approximate sizes
+ * @param sizeApproximationFlags flags to determine what to include in the
+ * approximation.
*
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
+ * @return the sizes.
*/
- public List<RocksIterator> newIterators(
- final List<ColumnFamilyHandle> columnFamilyHandleList)
- throws RocksDBException {
- return newIterators(columnFamilyHandleList, new ReadOptions());
+ public long[] getApproximateSizes(final List<Range> ranges,
+ final SizeApproximationFlag... sizeApproximationFlags) {
+ return getApproximateSizes(null, ranges, sizeApproximationFlags);
}
- /**
- * Returns iterators from a consistent database state across multiple
- * column families. Iterators are heap allocated and need to be deleted
- * before the db is deleted
- *
- * @param columnFamilyHandleList {@link java.util.List} containing
- * {@link org.rocksdb.ColumnFamilyHandle} instances.
- * @param readOptions {@link ReadOptions} instance.
- * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator}
- * instances
- *
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
- */
- public List<RocksIterator> newIterators(
- final List<ColumnFamilyHandle> columnFamilyHandleList,
- final ReadOptions readOptions) throws RocksDBException {
-
- final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()];
- for (int i = 0; i < columnFamilyHandleList.size(); i++) {
- columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_;
- }
-
- final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles,
- readOptions.nativeHandle_);
+ public static class CountAndSize {
+ public final long count;
+ public final long size;
- final List<RocksIterator> iterators = new ArrayList<>(
- columnFamilyHandleList.size());
- for (int i=0; i<columnFamilyHandleList.size(); i++){
- iterators.add(new RocksIterator(this, iteratorRefs[i]));
+ public CountAndSize(final long count, final long size) {
+ this.count = count;
+ this.size = size;
}
- return iterators;
- }
-
- /**
- * Gets the handle for the default column family
- *
- * @return The handle of the default column family
- */
- public ColumnFamilyHandle getDefaultColumnFamily() {
- final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this,
- getDefaultColumnFamily(nativeHandle_));
- cfHandle.disOwnNativeHandle();
- return cfHandle;
- }
-
- /**
- * Creates a new column family with the name columnFamilyName and
- * allocates a ColumnFamilyHandle within an internal structure.
- * The ColumnFamilyHandle is automatically disposed with DB disposal.
- *
- * @param columnFamilyDescriptor column family to be created.
- * @return {@link org.rocksdb.ColumnFamilyHandle} instance.
- *
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
- */
- public ColumnFamilyHandle createColumnFamily(
- final ColumnFamilyDescriptor columnFamilyDescriptor)
- throws RocksDBException {
- return new ColumnFamilyHandle(this, createColumnFamily(nativeHandle_,
- columnFamilyDescriptor.columnFamilyName(),
- columnFamilyDescriptor.columnFamilyOptions().nativeHandle_));
}
/**
- * Drops the column family specified by {@code columnFamilyHandle}. This call
- * only records a drop record in the manifest and prevents the column
- * family from flushing and compacting.
+ * This method is similar to
+ * {@link #getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)},
+ * except that it returns approximate number of records and size in memtables.
*
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance
- *
- * @throws RocksDBException thrown if error happens in underlying
- * native library.
- */
- public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle)
- throws RocksDBException {
- dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_);
- }
-
- // Bulk drop column families. This call only records drop records in the
- // manifest and prevents the column families from flushing and compacting.
- // In case of error, the request may succeed partially. User may call
- // ListColumnFamilies to check the result.
- public void dropColumnFamilies(
- final List<ColumnFamilyHandle> columnFamilies) throws RocksDBException {
- final long[] cfHandles = new long[columnFamilies.size()];
- for (int i = 0; i < columnFamilies.size(); i++) {
- cfHandles[i] = columnFamilies.get(i).nativeHandle_;
- }
- dropColumnFamilies(nativeHandle_, cfHandles);
- }
-
- /**
- * <p>Flush all memory table data.</p>
- *
- * <p>Note: it must be ensured that the FlushOptions instance
- * is not GC'ed before this method finishes. If the wait parameter is
- * set to false, flush processing is asynchronous.</p>
+ * instance, or null for the default column family
+ * @param range the ranges over which to get the memtable stats
*
- * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
- * @throws RocksDBException thrown if an error occurs within the native
- * part of the library.
+ * @return the count and size for the range
*/
- public void flush(final FlushOptions flushOptions)
- throws RocksDBException {
- flush(nativeHandle_, flushOptions.nativeHandle_);
+ public CountAndSize getApproximateMemTableStats(
+ /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle,
+ final Range range) {
+ final long[] result = getApproximateMemTableStats(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ range.start.getNativeHandle(),
+ range.limit.getNativeHandle());
+ return new CountAndSize(result[0], result[1]);
}
/**
- * <p>Flush all memory table data.</p>
+ * This method is similar to
+ * {@link #getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)},
+ * except that it returns approximate number of records and size in memtables.
*
- * <p>Note: it must be ensured that the FlushOptions instance
- * is not GC'ed before this method finishes. If the wait parameter is
- * set to false, flush processing is asynchronous.</p>
- *
- * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance.
- * @throws RocksDBException thrown if an error occurs within the native
- * part of the library.
+ * @param range the ranges over which to get the memtable stats
+ *
+ * @return the count and size for the range
*/
- public void flush(final FlushOptions flushOptions,
- final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException {
- flush(nativeHandle_, flushOptions.nativeHandle_,
- columnFamilyHandle.nativeHandle_);
+ public CountAndSize getApproximateMemTableStats(
+ final Range range) {
+ return getApproximateMemTableStats(null, range);
}
/**
* part of the library.
*/
public void compactRange() throws RocksDBException {
- compactRange0(nativeHandle_, false, -1, 0);
+ compactRange(null);
+ }
+
+ /**
+ * <p>Range compaction of column family.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
+ * boolean, int, int)}
+ * </li>
+ * </ul>
+ *
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ compactRange(nativeHandle_, null, -1, null, -1, 0,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
}
/**
*/
public void compactRange(final byte[] begin, final byte[] end)
throws RocksDBException {
- compactRange0(nativeHandle_, begin, begin.length, end,
- end.length, false, -1, 0);
+ compactRange(null, begin, end);
}
/**
- * <p>Range compaction of database.</p>
+ * <p>Range compaction of column family.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* all data will have been pushed down to the last level containing
* any data.</p>
*
- * <p>Compaction outputs should be placed in options.db_paths
- * [target_path_id]. Behavior is undefined if target_path_id is
- * out of range.</p>
- *
* <p><strong>See also</strong></p>
* <ul>
- * <li>{@link #compactRange()}</li>
- * <li>{@link #compactRange(byte[], byte[])}</li>
- * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
+ * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
+ * </li>
+ * <li>
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
+ * boolean, int, int)}
+ * </li>
* </ul>
*
- * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
- *
- * @param reduce_level reduce level after compaction
- * @param target_level target level to compact to
- * @param target_path_id the target path id of output path
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param begin start of key range (included in range)
+ * @param end end of key range (excluded from range)
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
- @Deprecated
- public void compactRange(final boolean reduce_level,
- final int target_level, final int target_path_id)
- throws RocksDBException {
- compactRange0(nativeHandle_, reduce_level,
- target_level, target_path_id);
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] begin, final byte[] end) throws RocksDBException {
+ compactRange(nativeHandle_,
+ begin, begin == null ? -1 : begin.length,
+ end, end == null ? -1 : end.length,
+ 0, columnFamilyHandle == null ? 0: columnFamilyHandle.nativeHandle_);
}
-
/**
* <p>Range compaction of database.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange()}</li>
- * <li>{@link #compactRange(boolean, int, int)}</li>
* <li>{@link #compactRange(byte[], byte[])}</li>
+ * <li>{@link #compactRange(byte[], byte[], boolean, int, int)}</li>
* </ul>
*
* @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
*
- * @param begin start of key range (included in range)
- * @param end end of key range (excluded from range)
- * @param reduce_level reduce level after compaction
- * @param target_level target level to compact to
- * @param target_path_id the target path id of output path
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
@Deprecated
- public void compactRange(final byte[] begin, final byte[] end,
- final boolean reduce_level, final int target_level,
- final int target_path_id) throws RocksDBException {
- compactRange0(nativeHandle_, begin, begin.length, end, end.length,
- reduce_level, target_level, target_path_id);
+ public void compactRange(final boolean changeLevel, final int targetLevel,
+ final int targetPathId) throws RocksDBException {
+ compactRange(null, changeLevel, targetLevel, targetPathId);
}
/**
* all data will have been pushed down to the last level containing
* any data.</p>
*
+ * <p>Compaction outputs should be placed in options.db_paths
+ * [target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.</p>
+ *
* <p><strong>See also</strong></p>
* <ul>
- * <li>
- * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
- * </li>
+ * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* <li>
* {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* </li>
* </ul>
*
+ * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
+ *
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance.
+ * instance, or null for the default column family.
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
- public void compactRange(final ColumnFamilyHandle columnFamilyHandle)
+ @Deprecated
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final boolean changeLevel, final int targetLevel, final int targetPathId)
throws RocksDBException {
- compactRange(nativeHandle_, false, -1, 0,
- columnFamilyHandle.nativeHandle_);
+ final CompactRangeOptions options = new CompactRangeOptions();
+ options.setChangeLevel(changeLevel);
+ options.setTargetLevel(targetLevel);
+ options.setTargetPathId(targetPathId);
+ compactRange(nativeHandle_,
+ null, -1,
+ null, -1,
+ options.nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * <p>Range compaction of database.</p>
+ * <p><strong>Note</strong>: After the database has been compacted,
+ * all data will have been pushed down to the last level containing
+ * any data.</p>
+ *
+ * <p>Compaction outputs should be placed in options.db_paths
+ * [target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.</p>
+ *
+ * <p><strong>See also</strong></p>
+ * <ul>
+ * <li>{@link #compactRange()}</li>
+ * <li>{@link #compactRange(boolean, int, int)}</li>
+ * <li>{@link #compactRange(byte[], byte[])}</li>
+ * </ul>
+ *
+ * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)}
+ * instead
+ *
+ * @param begin start of key range (included in range)
+ * @param end end of key range (excluded from range)
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
+ *
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ @Deprecated
+ public void compactRange(final byte[] begin, final byte[] end,
+ final boolean changeLevel, final int targetLevel,
+ final int targetPathId) throws RocksDBException {
+ compactRange(null, begin, end, changeLevel, targetLevel, targetPathId);
}
/**
* all data will have been pushed down to the last level containing
* any data.</p>
*
+ * <p>Compaction outputs should be placed in options.db_paths
+ * [target_path_id]. Behavior is undefined if target_path_id is
+ * out of range.</p>
+ *
* <p><strong>See also</strong></p>
* <ul>
* <li>{@link #compactRange(ColumnFamilyHandle)}</li>
* {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
* </li>
* <li>
- * {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
- * boolean, int, int)}
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
* </li>
* </ul>
*
+ * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
+ *
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
* instance.
* @param begin start of key range (included in range)
* @param end end of key range (excluded from range)
+ * @param changeLevel reduce level after compaction
+ * @param targetLevel target level to compact to
+ * @param targetPathId the target path id of output path
*
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
- public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] begin, final byte[] end) throws RocksDBException {
- compactRange(nativeHandle_, begin, begin.length, end, end.length,
- false, -1, 0, columnFamilyHandle.nativeHandle_);
+ @Deprecated
+ public void compactRange(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final byte[] begin, final byte[] end, final boolean changeLevel,
+ final int targetLevel, final int targetPathId)
+ throws RocksDBException {
+ final CompactRangeOptions options = new CompactRangeOptions();
+ options.setChangeLevel(changeLevel);
+ options.setTargetLevel(targetLevel);
+ options.setTargetPathId(targetPathId);
+ compactRange(nativeHandle_,
+ begin, begin == null ? -1 : begin.length,
+ end, end == null ? -1 : end.length,
+ options.nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
}
-
/**
* <p>Range compaction of column family.</p>
* <p><strong>Note</strong>: After the database has been compacted,
* part of the library.
*/
public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] begin, final byte[] end, CompactRangeOptions compactRangeOptions) throws RocksDBException {
- compactRange(nativeHandle_, begin, begin.length, end, end.length,
- compactRangeOptions.nativeHandle_, columnFamilyHandle.nativeHandle_);
+ final byte[] begin, final byte[] end,
+ final CompactRangeOptions compactRangeOptions) throws RocksDBException {
+ compactRange(nativeHandle_,
+ begin, begin == null ? -1 : begin.length,
+ end, end == null ? -1 : end.length,
+ compactRangeOptions.nativeHandle_, columnFamilyHandle.nativeHandle_);
}
/**
- * <p>Range compaction of column family.</p>
- * <p><strong>Note</strong>: After the database has been compacted,
- * all data will have been pushed down to the last level containing
- * any data.</p>
+ * Change the options for the column family handle.
*
- * <p>Compaction outputs should be placed in options.db_paths
- * [target_path_id]. Behavior is undefined if target_path_id is
- * out of range.</p>
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
+ * instance, or null for the default column family.
+ * @param mutableColumnFamilyOptions the options.
+ */
+ public void setOptions(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle,
+ final MutableColumnFamilyOptions mutableColumnFamilyOptions)
+ throws RocksDBException {
+ setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_,
+ mutableColumnFamilyOptions.getKeys(),
+ mutableColumnFamilyOptions.getValues());
+ }
+
+ /**
+ * Change the options for the default column family handle.
*
- * <p><strong>See also</strong></p>
- * <ul>
- * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
- * <li>
- * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
- * </li>
- * <li>
- * {@link #compactRange(ColumnFamilyHandle, byte[], byte[],
- * boolean, int, int)}
- * </li>
- * </ul>
+ * @param mutableColumnFamilyOptions the options.
+ */
+ public void setOptions(
+ final MutableColumnFamilyOptions mutableColumnFamilyOptions)
+ throws RocksDBException {
+ setOptions(null, mutableColumnFamilyOptions);
+ }
+
+ /**
+ * Set the options for the column family handle.
*
- * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
+ * @param mutableDBoptions the options.
+ */
+ public void setDBOptions(final MutableDBOptions mutableDBoptions)
+ throws RocksDBException {
+ setDBOptions(nativeHandle_,
+ mutableDBoptions.getKeys(),
+ mutableDBoptions.getValues());
+ }
+
+ /**
+ * Takes nputs a list of files specified by file names and
+ * compacts them to the specified level.
+ *
+ * Note that the behavior is different from
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * in that CompactFiles() performs the compaction job using the CURRENT
+ * thread.
+ *
+ * @param compactionOptions compaction options
+ * @param inputFileNames the name of the files to compact
+ * @param outputLevel the level to which they should be compacted
+ * @param outputPathId the id of the output path, or -1
+ * @param compactionJobInfo the compaction job info, this parameter
+ * will be updated with the info from compacting the files,
+ * can just be null if you don't need it.
+ */
+ public List<String> compactFiles(
+ final CompactionOptions compactionOptions,
+ final List<String> inputFileNames,
+ final int outputLevel,
+ final int outputPathId,
+ /* @Nullable */ final CompactionJobInfo compactionJobInfo)
+ throws RocksDBException {
+ return compactFiles(compactionOptions, null, inputFileNames, outputLevel,
+ outputPathId, compactionJobInfo);
+ }
+
+ /**
+ * Takes a list of files specified by file names and
+ * compacts them to the specified level.
+ *
+ * Note that the behavior is different from
+ * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
+ * in that CompactFiles() performs the compaction job using the CURRENT
+ * thread.
+ *
+ * @param compactionOptions compaction options
+ * @param columnFamilyHandle columnFamilyHandle, or null for the
+ * default column family
+ * @param inputFileNames the name of the files to compact
+ * @param outputLevel the level to which they should be compacted
+ * @param outputPathId the id of the output path, or -1
+ * @param compactionJobInfo the compaction job info, this parameter
+ * will be updated with the info from compacting the files,
+ * can just be null if you don't need it.
+ */
+ public List<String> compactFiles(
+ final CompactionOptions compactionOptions,
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle,
+ final List<String> inputFileNames,
+ final int outputLevel,
+ final int outputPathId,
+ /* @Nullable */ final CompactionJobInfo compactionJobInfo)
+ throws RocksDBException {
+ return Arrays.asList(compactFiles(nativeHandle_, compactionOptions.nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ inputFileNames.toArray(new String[0]),
+ outputLevel,
+ outputPathId,
+ compactionJobInfo == null ? 0 : compactionJobInfo.nativeHandle_));
+ }
+
+ /**
+ * This function will wait until all currently running background processes
+ * finish. After it returns, no background process will be run until
+ * {@link #continueBackgroundWork()} is called
*
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance.
- * @param reduce_level reduce level after compaction
- * @param target_level target level to compact to
- * @param target_path_id the target path id of output path
+ * @throws RocksDBException If an error occurs when pausing background work
+ */
+ public void pauseBackgroundWork() throws RocksDBException {
+ pauseBackgroundWork(nativeHandle_);
+ }
+
+ /**
+ * Resumes background work which was suspended by
+ * previously calling {@link #pauseBackgroundWork()}
+ *
+ * @throws RocksDBException If an error occurs when resuming background work
+ */
+ public void continueBackgroundWork() throws RocksDBException {
+ continueBackgroundWork(nativeHandle_);
+ }
+
+ /**
+ * Enable automatic compactions for the given column
+ * families if they were previously disabled.
+ *
+ * The function will first set the
+ * {@link ColumnFamilyOptions#disableAutoCompactions()} option for each
+ * column family to false, after which it will schedule a flush/compaction.
+ *
+ * NOTE: Setting disableAutoCompactions to 'false' through
+ * {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}
+ * does NOT schedule a flush/compaction afterwards, and only changes the
+ * parameter itself within the column family option.
+ *
+ * @param columnFamilyHandles the column family handles
+ */
+ public void enableAutoCompaction(
+ final List<ColumnFamilyHandle> columnFamilyHandles)
+ throws RocksDBException {
+ enableAutoCompaction(nativeHandle_,
+ toNativeHandleList(columnFamilyHandles));
+ }
+
+ /**
+ * Number of levels used for this DB.
+ *
+ * @return the number of levels
+ */
+ public int numberLevels() {
+ return numberLevels(null);
+ }
+
+ /**
+ * Number of levels used for a column family in this DB.
+ *
+ * @param columnFamilyHandle the column family handle, or null
+ * for the default column family
+ *
+ * @return the number of levels
+ */
+ public int numberLevels(/* @Nullable */final ColumnFamilyHandle columnFamilyHandle) {
+ return numberLevels(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Maximum level to which a new compacted memtable is pushed if it
+ * does not create overlap.
+ */
+ public int maxMemCompactionLevel() {
+ return maxMemCompactionLevel(null);
+ }
+
+ /**
+ * Maximum level to which a new compacted memtable is pushed if it
+ * does not create overlap.
+ *
+ * @param columnFamilyHandle the column family handle
+ */
+ public int maxMemCompactionLevel(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) {
+ return maxMemCompactionLevel(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Number of files in level-0 that would stop writes.
+ */
+ public int level0StopWriteTrigger() {
+ return level0StopWriteTrigger(null);
+ }
+
+ /**
+ * Number of files in level-0 that would stop writes.
+ *
+ * @param columnFamilyHandle the column family handle
+ */
+ public int level0StopWriteTrigger(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) {
+ return level0StopWriteTrigger(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get DB name -- the exact same name that was provided as an argument to
+ * as path to {@link #open(Options, String)}.
+ *
+ * @return the DB name
+ */
+ public String getName() {
+ return getName(nativeHandle_);
+ }
+
+ /**
+ * Get the Env object from the DB
+ *
+ * @return the env
+ */
+ public Env getEnv() {
+ final long envHandle = getEnv(nativeHandle_);
+ if (envHandle == Env.getDefault().nativeHandle_) {
+ return Env.getDefault();
+ } else {
+ final Env env = new RocksEnv(envHandle);
+ env.disOwnNativeHandle(); // we do not own the Env!
+ return env;
+ }
+ }
+
+ /**
+ * <p>Flush all memory table data.</p>
+ *
+ * <p>Note: it must be ensured that the FlushOptions instance
+ * is not GC'ed before this method finishes. If the wait parameter is
+ * set to false, flush processing is asynchronous.</p>
+ *
+ * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
+ * @throws RocksDBException thrown if an error occurs within the native
+ * part of the library.
+ */
+ public void flush(final FlushOptions flushOptions)
+ throws RocksDBException {
+ flush(flushOptions, (List<ColumnFamilyHandle>) null);
+ }
+
+ /**
+ * <p>Flush all memory table data.</p>
+ *
+ * <p>Note: it must be ensured that the FlushOptions instance
+ * is not GC'ed before this method finishes. If the wait parameter is
+ * set to false, flush processing is asynchronous.</p>
*
+ * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
+ * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance.
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
- @Deprecated
- public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
- final boolean reduce_level, final int target_level,
- final int target_path_id) throws RocksDBException {
- compactRange(nativeHandle_, reduce_level, target_level,
- target_path_id, columnFamilyHandle.nativeHandle_);
+ public void flush(final FlushOptions flushOptions,
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ flush(flushOptions,
+ columnFamilyHandle == null ? null : Arrays.asList(columnFamilyHandle));
}
/**
- * <p>Range compaction of column family.</p>
- * <p><strong>Note</strong>: After the database has been compacted,
- * all data will have been pushed down to the last level containing
- * any data.</p>
- *
- * <p>Compaction outputs should be placed in options.db_paths
- * [target_path_id]. Behavior is undefined if target_path_id is
- * out of range.</p>
- *
- * <p><strong>See also</strong></p>
- * <ul>
- * <li>{@link #compactRange(ColumnFamilyHandle)}</li>
- * <li>
- * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)}
- * </li>
- * <li>
- * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])}
- * </li>
- * </ul>
+ * Flushes multiple column families.
*
- * @deprecated Use {@link #compactRange(ColumnFamilyHandle, byte[], byte[], CompactRangeOptions)} instead
+ * If atomic flush is not enabled, this is equivalent to calling
+ * {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times.
*
- * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle}
- * instance.
- * @param begin start of key range (included in range)
- * @param end end of key range (excluded from range)
- * @param reduce_level reduce level after compaction
- * @param target_level target level to compact to
- * @param target_path_id the target path id of output path
+ * If atomic flush is enabled, this will flush all column families
+ * specified up to the latest sequence number at the time when flush is
+ * requested.
*
+ * @param flushOptions {@link org.rocksdb.FlushOptions} instance.
+ * @param columnFamilyHandles column family handles.
* @throws RocksDBException thrown if an error occurs within the native
* part of the library.
*/
- @Deprecated
- public void compactRange(final ColumnFamilyHandle columnFamilyHandle,
- final byte[] begin, final byte[] end, final boolean reduce_level,
- final int target_level, final int target_path_id)
+ public void flush(final FlushOptions flushOptions,
+ /* @Nullable */ final List<ColumnFamilyHandle> columnFamilyHandles)
throws RocksDBException {
- compactRange(nativeHandle_, begin, begin.length, end, end.length,
- reduce_level, target_level, target_path_id,
- columnFamilyHandle.nativeHandle_);
+ flush(nativeHandle_, flushOptions.nativeHandle_,
+ toNativeHandleList(columnFamilyHandles));
}
/**
- * This function will wait until all currently running background processes
- * finish. After it returns, no background process will be run until
- * {@link #continueBackgroundWork()} is called
+ * Flush the WAL memory buffer to the file. If {@code sync} is true,
+ * it calls {@link #syncWal()} afterwards.
*
- * @throws RocksDBException If an error occurs when pausing background work
+ * @param sync true to also fsync to disk.
*/
- public void pauseBackgroundWork() throws RocksDBException {
- pauseBackgroundWork(nativeHandle_);
+ public void flushWal(final boolean sync) throws RocksDBException {
+ flushWal(nativeHandle_, sync);
}
/**
- * Resumes backround work which was suspended by
- * previously calling {@link #pauseBackgroundWork()}
+ * Sync the WAL.
*
- * @throws RocksDBException If an error occurs when resuming background work
+ * Note that {@link #write(WriteOptions, WriteBatch)} followed by
+ * {@link #syncWal()} is not exactly the same as
+ * {@link #write(WriteOptions, WriteBatch)} with
+ * {@link WriteOptions#sync()} set to true; In the latter case the changes
+ * won't be visible until the sync is done.
+ *
+ * Currently only works if {@link Options#allowMmapWrites()} is set to false.
*/
- public void continueBackgroundWork() throws RocksDBException {
- continueBackgroundWork(nativeHandle_);
+ public void syncWal() throws RocksDBException {
+ syncWal(nativeHandle_);
}
/**
return getLatestSequenceNumber(nativeHandle_);
}
+ /**
+ * Instructs DB to preserve deletes with sequence numbers >= sequenceNumber.
+ *
+ * Has no effect if DBOptions#preserveDeletes() is set to false.
+ *
+ * This function assumes that user calls this function with monotonically
+ * increasing seqnums (otherwise we can't guarantee that a particular delete
+ * hasn't been already processed).
+ *
+ * @param sequenceNumber the minimum sequence number to preserve
+ *
+ * @return true if the value was successfully updated,
+ * false if user attempted to call if with
+ * sequenceNumber <= current value.
+ */
+ public boolean setPreserveDeletesSequenceNumber(final long sequenceNumber) {
+ return setPreserveDeletesSequenceNumber(nativeHandle_, sequenceNumber);
+ }
+
/**
* <p>Prevent file deletions. Compactions will continue to occur,
* but no obsolete files will be deleted. Calling this multiple
enableFileDeletions(nativeHandle_, force);
}
+ public static class LiveFiles {
+ /**
+ * The valid size of the manifest file. The manifest file is an ever growing
+ * file, but only the portion specified here is valid for this snapshot.
+ */
+ public final long manifestFileSize;
+
+ /**
+ * The files are relative to the {@link #getName()} and are not
+ * absolute paths. Despite being relative paths, the file names begin
+ * with "/".
+ */
+ public final List<String> files;
+
+ LiveFiles(final long manifestFileSize, final List<String> files) {
+ this.manifestFileSize = manifestFileSize;
+ this.files = files;
+ }
+ }
+
+ /**
+ * Retrieve the list of all files in the database after flushing the memtable.
+ *
+ * See {@link #getLiveFiles(boolean)}.
+ *
+ * @return the live files
+ */
+ public LiveFiles getLiveFiles() throws RocksDBException {
+ return getLiveFiles(true);
+ }
+
+ /**
+ * Retrieve the list of all files in the database.
+ *
+ * In case you have multiple column families, even if {@code flushMemtable}
+ * is true, you still need to call {@link #getSortedWalFiles()}
+ * after {@link #getLiveFiles(boolean)} to compensate for new data that
+ * arrived to already-flushed column families while other column families
+ * were flushing.
+ *
+ * NOTE: Calling {@link #getLiveFiles(boolean)} followed by
+ * {@link #getSortedWalFiles()} can generate a lossless backup.
+ *
+ * @param flushMemtable set to true to flush before recoding the live
+ * files. Setting to false is useful when we don't want to wait for flush
+ * which may have to wait for compaction to complete taking an
+ * indeterminate time.
+ *
+ * @return the live files
+ */
+ public LiveFiles getLiveFiles(final boolean flushMemtable)
+ throws RocksDBException {
+ final String[] result = getLiveFiles(nativeHandle_, flushMemtable);
+ if (result == null) {
+ return null;
+ }
+ final String[] files = Arrays.copyOf(result, result.length - 1);
+ final long manifestFileSize = Long.parseLong(result[result.length - 1]);
+
+ return new LiveFiles(manifestFileSize, Arrays.asList(files));
+ }
+
+ /**
+ * Retrieve the sorted list of all wal files with earliest file first.
+ *
+ * @return the log files
+ */
+ public List<LogFile> getSortedWalFiles() throws RocksDBException {
+ final LogFile[] logFiles = getSortedWalFiles(nativeHandle_);
+ return Arrays.asList(logFiles);
+ }
+
/**
* <p>Returns an iterator that is positioned at a write-batch containing
* seq_number. If the sequence number is non existent, it returns an iterator
getUpdatesSince(nativeHandle_, sequenceNumber));
}
- public void setOptions(final ColumnFamilyHandle columnFamilyHandle,
- final MutableColumnFamilyOptions mutableColumnFamilyOptions)
- throws RocksDBException {
- setOptions(nativeHandle_, columnFamilyHandle.nativeHandle_,
- mutableColumnFamilyOptions.getKeys(),
- mutableColumnFamilyOptions.getValues());
+ /**
+ * Delete the file name from the db directory and update the internal state to
+ * reflect that. Supports deletion of sst and log files only. 'name' must be
+ * path relative to the db directory. eg. 000001.sst, /archive/000003.log
+ *
+ * @param name the file name
+ */
+ public void deleteFile(final String name) throws RocksDBException {
+ deleteFile(nativeHandle_, name);
}
- private long[] toNativeHandleList(final List<? extends RocksObject> objectList) {
- final int len = objectList.size();
- final long[] handleList = new long[len];
- for (int i = 0; i < len; i++) {
- handleList[i] = objectList.get(i).nativeHandle_;
- }
- return handleList;
+ /**
+ * Gets a list of all table files metadata.
+ *
+ * @return table files metadata.
+ */
+ public List<LiveFileMetaData> getLiveFilesMetaData() {
+ return Arrays.asList(getLiveFilesMetaData(nativeHandle_));
+ }
+
+ /**
+ * Obtains the meta data of the specified column family of the DB.
+ *
+ * @param columnFamilyHandle the column family
+ *
+ * @return the column family metadata
+ */
+ public ColumnFamilyMetaData getColumnFamilyMetaData(
+ /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) {
+ return getColumnFamilyMetaData(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Obtains the meta data of the default column family of the DB.
+ *
+ * @return the column family metadata
+ */
+ public ColumnFamilyMetaData GetColumnFamilyMetaData() {
+ return getColumnFamilyMetaData(null);
}
/**
final IngestExternalFileOptions ingestExternalFileOptions)
throws RocksDBException {
ingestExternalFile(nativeHandle_, getDefaultColumnFamily().nativeHandle_,
- filePathList.toArray(new String[filePathList.size()]),
+ filePathList.toArray(new String[0]),
filePathList.size(), ingestExternalFileOptions.nativeHandle_);
}
final IngestExternalFileOptions ingestExternalFileOptions)
throws RocksDBException {
ingestExternalFile(nativeHandle_, columnFamilyHandle.nativeHandle_,
- filePathList.toArray(new String[filePathList.size()]),
+ filePathList.toArray(new String[0]),
filePathList.size(), ingestExternalFileOptions.nativeHandle_);
}
+ /**
+ * Verify checksum
+ *
+ * @throws RocksDBException if the checksum is not valid
+ */
+ public void verifyChecksum() throws RocksDBException {
+ verifyChecksum(nativeHandle_);
+ }
+
+ /**
+ * Gets the handle for the default column family
+ *
+ * @return The handle of the default column family
+ */
+ public ColumnFamilyHandle getDefaultColumnFamily() {
+ final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this,
+ getDefaultColumnFamily(nativeHandle_));
+ cfHandle.disOwnNativeHandle();
+ return cfHandle;
+ }
+
+ /**
+ * Get the properties of all tables.
+ *
+ * @param columnFamilyHandle the column family handle, or null for the default
+ * column family.
+ *
+ * @return the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfAllTables(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ return getPropertiesOfAllTables(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ }
+
+ /**
+ * Get the properties of all tables in the default column family.
+ *
+ * @return the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfAllTables()
+ throws RocksDBException {
+ return getPropertiesOfAllTables(null);
+ }
+
+ /**
+ * Get the properties of tables in range.
+ *
+ * @param columnFamilyHandle the column family handle, or null for the default
+ * column family.
+ * @param ranges the ranges over which to get the table properties
+ *
+ * @return the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfTablesInRange(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle,
+ final List<Range> ranges) throws RocksDBException {
+ return getPropertiesOfTablesInRange(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ toRangeSliceHandles(ranges));
+ }
+
+ /**
+ * Get the properties of tables in range for the default column family.
+ *
+ * @param ranges the ranges over which to get the table properties
+ *
+ * @return the properties
+ */
+ public Map<String, TableProperties> getPropertiesOfTablesInRange(
+ final List<Range> ranges) throws RocksDBException {
+ return getPropertiesOfTablesInRange(null, ranges);
+ }
+
+ /**
+ * Suggest the range to compact.
+ *
+ * @param columnFamilyHandle the column family handle, or null for the default
+ * column family.
+ *
+ * @return the suggested range.
+ */
+ public Range suggestCompactRange(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle)
+ throws RocksDBException {
+ final long[] rangeSliceHandles = suggestCompactRange(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_);
+ return new Range(new Slice(rangeSliceHandles[0]),
+ new Slice(rangeSliceHandles[1]));
+ }
+
+ /**
+ * Suggest the range to compact for the default column family.
+ *
+ * @return the suggested range.
+ */
+ public Range suggestCompactRange()
+ throws RocksDBException {
+ return suggestCompactRange(null);
+ }
+
+ /**
+ * Promote L0.
+ *
+ * @param columnFamilyHandle the column family handle,
+ * or null for the default column family.
+ */
+ public void promoteL0(
+ /* @Nullable */final ColumnFamilyHandle columnFamilyHandle,
+ final int targetLevel) throws RocksDBException {
+ promoteL0(nativeHandle_,
+ columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_,
+ targetLevel);
+ }
+
+ /**
+ * Promote L0 for the default column family.
+ */
+ public void promoteL0(final int targetLevel)
+ throws RocksDBException {
+ promoteL0(null, targetLevel);
+ }
+
+ /**
+ * Trace DB operations.
+ *
+ * Use {@link #endTrace()} to stop tracing.
+ *
+ * @param traceOptions the options
+ * @param traceWriter the trace writer
+ */
+ public void startTrace(final TraceOptions traceOptions,
+ final AbstractTraceWriter traceWriter) throws RocksDBException {
+ startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(),
+ traceWriter.nativeHandle_);
+ /**
+ * NOTE: {@link #startTrace(long, long, long) transfers the ownership
+ * from Java to C++, so we must disown the native handle here.
+ */
+ traceWriter.disOwnNativeHandle();
+ }
+
+ /**
+ * Stop tracing DB operations.
+ *
+ * See {@link #startTrace(TraceOptions, AbstractTraceWriter)}
+ */
+ public void endTrace() throws RocksDBException {
+ endTrace(nativeHandle_);
+ }
+
/**
* Static method to destroy the contents of the specified database.
* Be very careful using this method.
destroyDB(path, options.nativeHandle_);
}
- /**
- * Private constructor.
- *
- * @param nativeHandle The native handle of the C++ RocksDB object
- */
- protected RocksDB(final long nativeHandle) {
- super(nativeHandle);
+ private /* @Nullable */ long[] toNativeHandleList(
+ /* @Nullable */ final List<? extends RocksObject> objectList) {
+ if (objectList == null) {
+ return null;
+ }
+ final int len = objectList.size();
+ final long[] handleList = new long[len];
+ for (int i = 0; i < len; i++) {
+ handleList[i] = objectList.get(i).nativeHandle_;
+ }
+ return handleList;
+ }
+
+ private static long[] toRangeSliceHandles(final List<Range> ranges) {
+ final long rangeSliceHandles[] = new long [ranges.size() * 2];
+ for (int i = 0, j = 0; i < ranges.size(); i++) {
+ final Range range = ranges.get(i);
+ rangeSliceHandles[j++] = range.start.getNativeHandle();
+ rangeSliceHandles[j++] = range.limit.getNativeHandle();
+ }
+ return rangeSliceHandles;
+ }
+
+ protected void storeOptionsInstance(DBOptionsInterface options) {
+ options_ = options;
+ }
+
+ private static void checkBounds(int offset, int len, int size) {
+ if ((offset | len | (offset + len) | (size - (offset + len))) < 0) {
+ throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size));
+ }
+ }
+
+ private static int computeCapacityHint(final int estimatedNumberOfItems) {
+ // Default load factor for HashMap is 0.75, so N * 1.5 will be at the load
+ // limit. We add +1 for a buffer.
+ return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0);
}
// native methods
- protected native static long open(final long optionsHandle,
+ private native static long open(final long optionsHandle,
final String path) throws RocksDBException;
/**
*
* @throws RocksDBException thrown if the database could not be opened
*/
- protected native static long[] open(final long optionsHandle,
+ private native static long[] open(final long optionsHandle,
final String path, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions) throws RocksDBException;
- protected native static long openROnly(final long optionsHandle,
+ private native static long openROnly(final long optionsHandle,
final String path) throws RocksDBException;
/**
*
* @throws RocksDBException thrown if the database could not be opened
*/
- protected native static long[] openROnly(final long optionsHandle,
+ private native static long[] openROnly(final long optionsHandle,
final String path, final byte[][] columnFamilyNames,
final long[] columnFamilyOptions
) throws RocksDBException;
- protected native static byte[][] listColumnFamilies(long optionsHandle,
- String path) throws RocksDBException;
- protected native void put(long handle, byte[] key, int keyOffset,
- int keyLength, byte[] value, int valueOffset, int valueLength)
+ @Override protected native void disposeInternal(final long handle);
+
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
+ private native static byte[][] listColumnFamilies(final long optionsHandle,
+ final String path) throws RocksDBException;
+ private native long createColumnFamily(final long handle,
+ final byte[] columnFamilyName, final int columnFamilyNamelen,
+ final long columnFamilyOptions) throws RocksDBException;
+ private native long[] createColumnFamilies(final long handle,
+ final long columnFamilyOptionsHandle, final byte[][] columnFamilyNames)
+ throws RocksDBException;
+ private native long[] createColumnFamilies(final long handle,
+ final long columnFamilyOptionsHandles[], final byte[][] columnFamilyNames)
+ throws RocksDBException;
+ private native void dropColumnFamily(
+ final long handle, final long cfHandle) throws RocksDBException;
+ private native void dropColumnFamilies(final long handle,
+ final long[] cfHandles) throws RocksDBException;
+ //TODO(AR) best way to express DestroyColumnFamilyHandle? ...maybe in ColumnFamilyHandle?
+ private native void put(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, int valueLength) throws RocksDBException;
+ private native void put(final long handle, final byte[] key, final int keyOffset,
+ final int keyLength, final byte[] value, final int valueOffset,
+ final int valueLength, final long cfHandle) throws RocksDBException;
+ private native void put(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength)
+ throws RocksDBException;
+ private native void put(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength,
+ final long cfHandle) throws RocksDBException;
+ private native void delete(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength) throws RocksDBException;
+ private native void delete(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final long cfHandle)
+ throws RocksDBException;
+ private native void delete(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength)
+ throws RocksDBException;
+ private native void delete(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final long cfHandle) throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final byte[] key, final int keyLen)
+ throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final byte[] key, final int keyLen,
+ final long cfHandle) throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final long writeOptHandle, final byte[] key,
+ final int keyLen) throws RocksDBException;
+ private native void singleDelete(
+ final long handle, final long writeOptHandle,
+ final byte[] key, final int keyLen, final long cfHandle)
+ throws RocksDBException;
+ private native void deleteRange(final long handle, final byte[] beginKey,
+ final int beginKeyOffset, final int beginKeyLength, final byte[] endKey,
+ final int endKeyOffset, final int endKeyLength) throws RocksDBException;
+ private native void deleteRange(final long handle, final byte[] beginKey,
+ final int beginKeyOffset, final int beginKeyLength, final byte[] endKey,
+ final int endKeyOffset, final int endKeyLength, final long cfHandle)
+ throws RocksDBException;
+ private native void deleteRange(final long handle, final long writeOptHandle,
+ final byte[] beginKey, final int beginKeyOffset, final int beginKeyLength,
+ final byte[] endKey, final int endKeyOffset, final int endKeyLength)
throws RocksDBException;
- protected native void put(long handle, byte[] key, int keyOffset,
- int keyLength, byte[] value, int valueOffset, int valueLength,
- long cfHandle) throws RocksDBException;
- protected native void put(long handle, long writeOptHandle, byte[] key,
- int keyOffset, int keyLength, byte[] value, int valueOffset,
- int valueLength) throws RocksDBException;
- protected native void put(long handle, long writeOptHandle, byte[] key,
- int keyOffset, int keyLength, byte[] value, int valueOffset,
- int valueLength, long cfHandle) throws RocksDBException;
- protected native void write0(final long handle, long writeOptHandle,
- long wbHandle) throws RocksDBException;
- protected native void write1(final long handle, long writeOptHandle,
- long wbwiHandle) throws RocksDBException;
- protected native boolean keyMayExist(final long handle, final byte[] key,
+ private native void deleteRange(
+ final long handle, final long writeOptHandle, final byte[] beginKey,
+ final int beginKeyOffset, final int beginKeyLength, final byte[] endKey,
+ final int endKeyOffset, final int endKeyLength, final long cfHandle)
+ throws RocksDBException;
+ private native void merge(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, final int valueLength) throws RocksDBException;
+ private native void merge(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, final int valueLength, final long cfHandle)
+ throws RocksDBException;
+ private native void merge(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength)
+ throws RocksDBException;
+ private native void merge(final long handle, final long writeOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength,
+ final long cfHandle) throws RocksDBException;
+ private native void write0(final long handle, final long writeOptHandle,
+ final long wbHandle) throws RocksDBException;
+ private native void write1(final long handle, final long writeOptHandle,
+ final long wbwiHandle) throws RocksDBException;
+ private native int get(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final byte[] value,
+ final int valueOffset, final int valueLength) throws RocksDBException;
+ private native int get(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, byte[] value,
+ final int valueOffset, final int valueLength, final long cfHandle)
+ throws RocksDBException;
+ private native int get(final long handle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength)
+ throws RocksDBException;
+ private native int get(final long handle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength,
+ final byte[] value, final int valueOffset, final int valueLength,
+ final long cfHandle) throws RocksDBException;
+ private native byte[] get(final long handle, byte[] key, final int keyOffset,
+ final int keyLength) throws RocksDBException;
+ private native byte[] get(final long handle, final byte[] key,
+ final int keyOffset, final int keyLength, final long cfHandle)
+ throws RocksDBException;
+ private native byte[] get(final long handle, final long readOptHandle,
+ final byte[] key, final int keyOffset, final int keyLength)
+ throws RocksDBException;
+ private native byte[] get(final long handle,
+ final long readOptHandle, final byte[] key, final int keyOffset,
+ final int keyLength, final long cfHandle) throws RocksDBException;
+ private native byte[][] multiGet(final long dbHandle, final byte[][] keys,
+ final int[] keyOffsets, final int[] keyLengths);
+ private native byte[][] multiGet(final long dbHandle, final byte[][] keys,
+ final int[] keyOffsets, final int[] keyLengths,
+ final long[] columnFamilyHandles);
+ private native byte[][] multiGet(final long dbHandle, final long rOptHandle,
+ final byte[][] keys, final int[] keyOffsets, final int[] keyLengths);
+ private native byte[][] multiGet(final long dbHandle, final long rOptHandle,
+ final byte[][] keys, final int[] keyOffsets, final int[] keyLengths,
+ final long[] columnFamilyHandles);
+ private native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength,
final StringBuilder stringBuilder);
- protected native boolean keyMayExist(final long handle, final byte[] key,
+ private native boolean keyMayExist(final long handle, final byte[] key,
final int keyOffset, final int keyLength, final long cfHandle,
final StringBuilder stringBuilder);
- protected native boolean keyMayExist(final long handle,
+ private native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final StringBuilder stringBuilder);
- protected native boolean keyMayExist(final long handle,
+ private native boolean keyMayExist(final long handle,
final long optionsHandle, final byte[] key, final int keyOffset,
final int keyLength, final long cfHandle,
final StringBuilder stringBuilder);
- protected native void merge(long handle, byte[] key, int keyOffset,
- int keyLength, byte[] value, int valueOffset, int valueLength)
+ private native long iterator(final long handle);
+ private native long iterator(final long handle, final long readOptHandle);
+ private native long iteratorCF(final long handle, final long cfHandle);
+ private native long iteratorCF(final long handle, final long cfHandle,
+ final long readOptHandle);
+ private native long[] iterators(final long handle,
+ final long[] columnFamilyHandles, final long readOptHandle)
throws RocksDBException;
- protected native void merge(long handle, byte[] key, int keyOffset,
- int keyLength, byte[] value, int valueOffset, int valueLength,
- long cfHandle) throws RocksDBException;
- protected native void merge(long handle, long writeOptHandle, byte[] key,
- int keyOffset, int keyLength, byte[] value, int valueOffset,
- int valueLength) throws RocksDBException;
- protected native void merge(long handle, long writeOptHandle, byte[] key,
- int keyOffset, int keyLength, byte[] value, int valueOffset,
- int valueLength, long cfHandle) throws RocksDBException;
- protected native int get(long handle, byte[] key, int keyOffset,
- int keyLength, byte[] value, int valueOffset, int valueLength)
+ private native long getSnapshot(final long nativeHandle);
+ private native void releaseSnapshot(
+ final long nativeHandle, final long snapshotHandle);
+ private native String getProperty(final long nativeHandle,
+ final long cfHandle, final String property, final int propertyLength)
throws RocksDBException;
- protected native int get(long handle, byte[] key, int keyOffset,
- int keyLength, byte[] value, int valueOffset, int valueLength,
- long cfHandle) throws RocksDBException;
- protected native int get(long handle, long readOptHandle, byte[] key,
- int keyOffset, int keyLength, byte[] value, int valueOffset,
- int valueLength) throws RocksDBException;
- protected native int get(long handle, long readOptHandle, byte[] key,
- int keyOffset, int keyLength, byte[] value, int valueOffset,
- int valueLength, long cfHandle) throws RocksDBException;
- protected native byte[][] multiGet(final long dbHandle, final byte[][] keys,
- final int[] keyOffsets, final int[] keyLengths);
- protected native byte[][] multiGet(final long dbHandle, final byte[][] keys,
- final int[] keyOffsets, final int[] keyLengths,
- final long[] columnFamilyHandles);
- protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
- final byte[][] keys, final int[] keyOffsets, final int[] keyLengths);
- protected native byte[][] multiGet(final long dbHandle, final long rOptHandle,
- final byte[][] keys, final int[] keyOffsets, final int[] keyLengths,
- final long[] columnFamilyHandles);
- protected native byte[] get(long handle, byte[] key, int keyOffset,
- int keyLength) throws RocksDBException;
- protected native byte[] get(long handle, byte[] key, int keyOffset,
- int keyLength, long cfHandle) throws RocksDBException;
- protected native byte[] get(long handle, long readOptHandle,
- byte[] key, int keyOffset, int keyLength) throws RocksDBException;
- protected native byte[] get(long handle, long readOptHandle, byte[] key,
- int keyOffset, int keyLength, long cfHandle) throws RocksDBException;
- protected native void delete(long handle, byte[] key, int keyOffset,
- int keyLength) throws RocksDBException;
- protected native void delete(long handle, byte[] key, int keyOffset,
- int keyLength, long cfHandle) throws RocksDBException;
- protected native void delete(long handle, long writeOptHandle, byte[] key,
- int keyOffset, int keyLength) throws RocksDBException;
- protected native void delete(long handle, long writeOptHandle, byte[] key,
- int keyOffset, int keyLength, long cfHandle) throws RocksDBException;
- protected native void singleDelete(
- long handle, byte[] key, int keyLen) throws RocksDBException;
- protected native void singleDelete(
- long handle, byte[] key, int keyLen, long cfHandle)
+ private native Map<String, String> getMapProperty(final long nativeHandle,
+ final long cfHandle, final String property, final int propertyLength)
throws RocksDBException;
- protected native void singleDelete(
- long handle, long writeOptHandle,
- byte[] key, int keyLen) throws RocksDBException;
- protected native void singleDelete(
- long handle, long writeOptHandle,
- byte[] key, int keyLen, long cfHandle) throws RocksDBException;
- protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset,
- int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength)
+ private native long getLongProperty(final long nativeHandle,
+ final long cfHandle, final String property, final int propertyLength)
throws RocksDBException;
- protected native void deleteRange(long handle, byte[] beginKey, int beginKeyOffset,
- int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength, long cfHandle)
+ private native void resetStats(final long nativeHandle)
throws RocksDBException;
- protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey,
- int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength)
+ private native long getAggregatedLongProperty(final long nativeHandle,
+ final String property, int propertyLength) throws RocksDBException;
+ private native long[] getApproximateSizes(final long nativeHandle,
+ final long columnFamilyHandle, final long[] rangeSliceHandles,
+ final byte includeFlags);
+ private final native long[] getApproximateMemTableStats(
+ final long nativeHandle, final long columnFamilyHandle,
+ final long rangeStartSliceHandle, final long rangeLimitSliceHandle);
+ private native void compactRange(final long handle,
+ /* @Nullable */ final byte[] begin, final int beginLen,
+ /* @Nullable */ final byte[] end, final int endLen,
+ final long compactRangeOptHandle, final long cfHandle)
throws RocksDBException;
- protected native void deleteRange(long handle, long writeOptHandle, byte[] beginKey,
- int beginKeyOffset, int beginKeyLength, byte[] endKey, int endKeyOffset, int endKeyLength,
- long cfHandle) throws RocksDBException;
- protected native String getProperty0(long nativeHandle,
- String property, int propertyLength) throws RocksDBException;
- protected native String getProperty0(long nativeHandle, long cfHandle,
- String property, int propertyLength) throws RocksDBException;
- protected native long getLongProperty(long nativeHandle, String property,
- int propertyLength) throws RocksDBException;
- protected native long getLongProperty(long nativeHandle, long cfHandle,
- String property, int propertyLength) throws RocksDBException;
- protected native long getAggregatedLongProperty(long nativeHandle, String property,
- int propertyLength) throws RocksDBException;
- protected native long iterator(long handle);
- protected native long iterator(long handle, long readOptHandle);
- protected native long iteratorCF(long handle, long cfHandle);
- protected native long iteratorCF(long handle, long cfHandle,
- long readOptHandle);
- protected native long[] iterators(final long handle,
- final long[] columnFamilyHandles, final long readOptHandle)
+ private native void setOptions(final long handle, final long cfHandle,
+ final String[] keys, final String[] values) throws RocksDBException;
+ private native void setDBOptions(final long handle,
+ final String[] keys, final String[] values) throws RocksDBException;
+ private native String[] compactFiles(final long handle,
+ final long compactionOptionsHandle,
+ final long columnFamilyHandle,
+ final String[] inputFileNames,
+ final int outputLevel,
+ final int outputPathId,
+ final long compactionJobInfoHandle) throws RocksDBException;
+ private native void pauseBackgroundWork(final long handle)
throws RocksDBException;
- protected native long getSnapshot(long nativeHandle);
- protected native void releaseSnapshot(
- long nativeHandle, long snapshotHandle);
- @Override protected native void disposeInternal(final long handle);
- private native long getDefaultColumnFamily(long handle);
- private native long createColumnFamily(final long handle,
- final byte[] columnFamilyName, final long columnFamilyOptions)
+ private native void continueBackgroundWork(final long handle)
throws RocksDBException;
- private native void dropColumnFamily(final long handle, final long cfHandle)
+ private native void enableAutoCompaction(final long handle,
+ final long[] columnFamilyHandles) throws RocksDBException;
+ private native int numberLevels(final long handle,
+ final long columnFamilyHandle);
+ private native int maxMemCompactionLevel(final long handle,
+ final long columnFamilyHandle);
+ private native int level0StopWriteTrigger(final long handle,
+ final long columnFamilyHandle);
+ private native String getName(final long handle);
+ private native long getEnv(final long handle);
+ private native void flush(final long handle, final long flushOptHandle,
+ /* @Nullable */ final long[] cfHandles) throws RocksDBException;
+ private native void flushWal(final long handle, final boolean sync)
throws RocksDBException;
- private native void dropColumnFamilies(final long handle,
- final long[] cfHandles) throws RocksDBException;
- private native void flush(long handle, long flushOptHandle)
+ private native void syncWal(final long handle) throws RocksDBException;
+ private native long getLatestSequenceNumber(final long handle);
+ private native boolean setPreserveDeletesSequenceNumber(final long handle,
+ final long sequenceNumber);
+ private native void disableFileDeletions(long handle) throws RocksDBException;
+ private native void enableFileDeletions(long handle, boolean force)
throws RocksDBException;
- private native void flush(long handle, long flushOptHandle, long cfHandle)
+ private native String[] getLiveFiles(final long handle,
+ final boolean flushMemtable) throws RocksDBException;
+ private native LogFile[] getSortedWalFiles(final long handle)
throws RocksDBException;
- private native void compactRange0(long handle, boolean reduce_level,
- int target_level, int target_path_id) throws RocksDBException;
- private native void compactRange0(long handle, byte[] begin, int beginLen,
- byte[] end, int endLen, boolean reduce_level, int target_level,
- int target_path_id) throws RocksDBException;
- private native void compactRange(long handle, byte[] begin, int beginLen,
- byte[] end, int endLen, long compactRangeOptHandle, long cfHandle)
- throws RocksDBException;
- private native void compactRange(long handle, boolean reduce_level,
- int target_level, int target_path_id, long cfHandle)
+ private native long getUpdatesSince(final long handle,
+ final long sequenceNumber) throws RocksDBException;
+ private native void deleteFile(final long handle, final String name)
throws RocksDBException;
- private native void compactRange(long handle, byte[] begin, int beginLen,
- byte[] end, int endLen, boolean reduce_level, int target_level,
- int target_path_id, long cfHandle) throws RocksDBException;
- private native void pauseBackgroundWork(long handle) throws RocksDBException;
- private native void continueBackgroundWork(long handle) throws RocksDBException;
- private native long getLatestSequenceNumber(long handle);
- private native void disableFileDeletions(long handle) throws RocksDBException;
- private native void enableFileDeletions(long handle, boolean force)
+ private native LiveFileMetaData[] getLiveFilesMetaData(final long handle);
+ private native ColumnFamilyMetaData getColumnFamilyMetaData(
+ final long handle, final long columnFamilyHandle);
+ private native void ingestExternalFile(final long handle,
+ final long columnFamilyHandle, final String[] filePathList,
+ final int filePathListLen, final long ingestExternalFileOptionsHandle)
throws RocksDBException;
- private native long getUpdatesSince(long handle, long sequenceNumber)
+ private native void verifyChecksum(final long handle) throws RocksDBException;
+ private native long getDefaultColumnFamily(final long handle);
+ private native Map<String, TableProperties> getPropertiesOfAllTables(
+ final long handle, final long columnFamilyHandle) throws RocksDBException;
+ private native Map<String, TableProperties> getPropertiesOfTablesInRange(
+ final long handle, final long columnFamilyHandle,
+ final long[] rangeSliceHandles);
+ private native long[] suggestCompactRange(final long handle,
+ final long columnFamilyHandle) throws RocksDBException;
+ private native void promoteL0(final long handle,
+ final long columnFamilyHandle, final int tragetLevel)
throws RocksDBException;
- private native void setOptions(long handle, long cfHandle, String[] keys,
- String[] values) throws RocksDBException;
- private native void ingestExternalFile(long handle, long cfHandle,
- String[] filePathList, int filePathListLen,
- long ingest_external_file_options_handle) throws RocksDBException;
+ private native void startTrace(final long handle, final long maxTraceFileSize,
+ final long traceWriterHandle) throws RocksDBException;
+ private native void endTrace(final long handle) throws RocksDBException;
+
+
private native static void destroyDB(final String path,
final long optionsHandle) throws RocksDBException;
+
protected DBOptionsInterface options_;
}
*/
RocksEnv(final long handle) {
super(handle);
- disOwnNativeHandle();
}
- /**
- * <p>The helper function of {@link #dispose()} which all subclasses of
- * {@link RocksObject} must implement to release their associated C++
- * resource.</p>
- *
- * <p><strong>Note:</strong> this class is used to use the default
- * RocksEnv with RocksJava. The default env allocation is managed
- * by C++.</p>
- */
@Override
- protected final void disposeInternal(final long handle) {
- }
+ protected native final void disposeInternal(final long handle);
}
package org.rocksdb;
/**
- * RocksDB memory environment.
+ * Memory environment.
*/
+//TODO(AR) rename to MemEnv
public class RocksMemEnv extends Env {
/**
- * <p>Creates a new RocksDB environment that stores its data
+ * <p>Creates a new environment that stores its data
* in memory and delegates all non-file-storage tasks to
- * base_env. The caller must delete the result when it is
+ * {@code baseEnv}.</p>
+ *
+ * <p>The caller must delete the result when it is
* no longer needed.</p>
*
- * <p>{@code *base_env} must remain live while the result is in use.</p>
+ * @param baseEnv the base environment,
+ * must remain live while the result is in use.
+ */
+ public RocksMemEnv(final Env baseEnv) {
+ super(createMemEnv(baseEnv.nativeHandle_));
+ }
+
+ /**
+ * @deprecated Use {@link #RocksMemEnv(Env)}.
*/
+ @Deprecated
public RocksMemEnv() {
- super(createMemEnv());
+ this(Env.getDefault());
}
- private static native long createMemEnv();
+ private static native long createMemEnv(final long baseEnvHandle);
@Override protected final native void disposeInternal(final long handle);
}
--- /dev/null
+package org.rocksdb;
+
+import java.util.List;
+
+/**
+ * Flags for
+ * {@link RocksDB#getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)}
+ * that specify whether memtable stats should be included,
+ * or file stats approximation or both.
+ */
+public enum SizeApproximationFlag {
+ NONE((byte)0x0),
+ INCLUDE_MEMTABLES((byte)0x1),
+ INCLUDE_FILES((byte)0x2);
+
+ private final byte value;
+
+ SizeApproximationFlag(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal byte representation.
+ *
+ * @return the internal representation.
+ */
+ byte getValue() {
+ return value;
+ }
+}
* Slice instances using a handle. </p>
*
* @param nativeHandle address of native instance.
- * @param owningNativeHandle whether to own this reference from the C++ side or not
+ * @param owningNativeHandle true if the Java side owns the memory pointed to
+ * by this reference, false if ownership belongs to the C++ side
*/
Slice(final long nativeHandle, final boolean owningNativeHandle) {
super();
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The metadata that describes a SST file.
+ */
+public class SstFileMetaData {
+ private final String fileName;
+ private final String path;
+ private final long size;
+ private final long smallestSeqno;
+ private final long largestSeqno;
+ private final byte[] smallestKey;
+ private final byte[] largestKey;
+ private final long numReadsSampled;
+ private final boolean beingCompacted;
+ private final long numEntries;
+ private final long numDeletions;
+
+ /**
+ * Called from JNI C++
+ */
+ protected SstFileMetaData(
+ final String fileName,
+ final String path,
+ final long size,
+ final long smallestSeqno,
+ final long largestSeqno,
+ final byte[] smallestKey,
+ final byte[] largestKey,
+ final long numReadsSampled,
+ final boolean beingCompacted,
+ final long numEntries,
+ final long numDeletions) {
+ this.fileName = fileName;
+ this.path = path;
+ this.size = size;
+ this.smallestSeqno = smallestSeqno;
+ this.largestSeqno = largestSeqno;
+ this.smallestKey = smallestKey;
+ this.largestKey = largestKey;
+ this.numReadsSampled = numReadsSampled;
+ this.beingCompacted = beingCompacted;
+ this.numEntries = numEntries;
+ this.numDeletions = numDeletions;
+ }
+
+ /**
+ * Get the name of the file.
+ *
+ * @return the name of the file.
+ */
+ public String fileName() {
+ return fileName;
+ }
+
+ /**
+ * Get the full path where the file locates.
+ *
+ * @return the full path
+ */
+ public String path() {
+ return path;
+ }
+
+ /**
+ * Get the file size in bytes.
+ *
+ * @return file size
+ */
+ public long size() {
+ return size;
+ }
+
+ /**
+ * Get the smallest sequence number in file.
+ *
+ * @return the smallest sequence number
+ */
+ public long smallestSeqno() {
+ return smallestSeqno;
+ }
+
+ /**
+ * Get the largest sequence number in file.
+ *
+ * @return the largest sequence number
+ */
+ public long largestSeqno() {
+ return largestSeqno;
+ }
+
+ /**
+ * Get the smallest user defined key in the file.
+ *
+ * @return the smallest user defined key
+ */
+ public byte[] smallestKey() {
+ return smallestKey;
+ }
+
+ /**
+ * Get the largest user defined key in the file.
+ *
+ * @return the largest user defined key
+ */
+ public byte[] largestKey() {
+ return largestKey;
+ }
+
+ /**
+ * Get the number of times the file has been read.
+ *
+ * @return the number of times the file has been read
+ */
+ public long numReadsSampled() {
+ return numReadsSampled;
+ }
+
+ /**
+ * Returns true if the file is currently being compacted.
+ *
+ * @return true if the file is currently being compacted, false otherwise.
+ */
+ public boolean beingCompacted() {
+ return beingCompacted;
+ }
+
+ /**
+ * Get the number of entries.
+ *
+ * @return the number of entries.
+ */
+ public long numEntries() {
+ return numEntries;
+ }
+
+ /**
+ * Get the number of deletions.
+ *
+ * @return the number of deletions.
+ */
+ public long numDeletions() {
+ return numDeletions;
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The type used to refer to a thread state.
+ *
+ * A state describes lower-level action of a thread
+ * such as reading / writing a file or waiting for a mutex.
+ */
+public enum StateType {
+ STATE_UNKNOWN((byte)0x0),
+ STATE_MUTEX_WAIT((byte)0x1);
+
+ private final byte value;
+
+ StateType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the State type from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the state type
+ *
+ * @throws IllegalArgumentException if the value does not match
+ * a StateType
+ */
+ static StateType fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final StateType threadType : StateType.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Unknown value for StateType: " + value);
+ }
+}
}
}
throw new IllegalArgumentException(
- "Illegal value provided for InfoLogLevel.");
+ "Illegal value provided for StatsLevel.");
}
}
--- /dev/null
+package org.rocksdb;
+
+/**
+ * Filter for iterating a table.
+ */
+public interface TableFilter {
+
+ /**
+ * A callback to determine whether relevant keys for this scan exist in a
+ * given table based on the table's properties. The callback is passed the
+ * properties of each table during iteration. If the callback returns false,
+ * the table will not be scanned. This option only affects Iterators and has
+ * no impact on point lookups.
+ *
+ * @param tableProperties the table properties.
+ *
+ * @return true if the table should be scanned, false otherwise.
+ */
+ boolean filter(final TableProperties tableProperties);
+}
--- /dev/null
+package org.rocksdb;
+
+import java.util.Map;
+
+/**
+ * TableProperties contains read-only properties of its associated
+ * table.
+ */
+public class TableProperties {
+ private final long dataSize;
+ private final long indexSize;
+ private final long indexPartitions;
+ private final long topLevelIndexSize;
+ private final long indexKeyIsUserKey;
+ private final long indexValueIsDeltaEncoded;
+ private final long filterSize;
+ private final long rawKeySize;
+ private final long rawValueSize;
+ private final long numDataBlocks;
+ private final long numEntries;
+ private final long numDeletions;
+ private final long numMergeOperands;
+ private final long numRangeDeletions;
+ private final long formatVersion;
+ private final long fixedKeyLen;
+ private final long columnFamilyId;
+ private final long creationTime;
+ private final long oldestKeyTime;
+ private final byte[] columnFamilyName;
+ private final String filterPolicyName;
+ private final String comparatorName;
+ private final String mergeOperatorName;
+ private final String prefixExtractorName;
+ private final String propertyCollectorsNames;
+ private final String compressionName;
+ private final Map<String, String> userCollectedProperties;
+ private final Map<String, String> readableProperties;
+ private final Map<String, Long> propertiesOffsets;
+
+ /**
+ * Access is private as this will only be constructed from
+ * C++ via JNI.
+ */
+ private TableProperties(final long dataSize, final long indexSize,
+ final long indexPartitions, final long topLevelIndexSize,
+ final long indexKeyIsUserKey, final long indexValueIsDeltaEncoded,
+ final long filterSize, final long rawKeySize, final long rawValueSize,
+ final long numDataBlocks, final long numEntries, final long numDeletions,
+ final long numMergeOperands, final long numRangeDeletions,
+ final long formatVersion, final long fixedKeyLen,
+ final long columnFamilyId, final long creationTime,
+ final long oldestKeyTime, final byte[] columnFamilyName,
+ final String filterPolicyName, final String comparatorName,
+ final String mergeOperatorName, final String prefixExtractorName,
+ final String propertyCollectorsNames, final String compressionName,
+ final Map<String, String> userCollectedProperties,
+ final Map<String, String> readableProperties,
+ final Map<String, Long> propertiesOffsets) {
+ this.dataSize = dataSize;
+ this.indexSize = indexSize;
+ this.indexPartitions = indexPartitions;
+ this.topLevelIndexSize = topLevelIndexSize;
+ this.indexKeyIsUserKey = indexKeyIsUserKey;
+ this.indexValueIsDeltaEncoded = indexValueIsDeltaEncoded;
+ this.filterSize = filterSize;
+ this.rawKeySize = rawKeySize;
+ this.rawValueSize = rawValueSize;
+ this.numDataBlocks = numDataBlocks;
+ this.numEntries = numEntries;
+ this.numDeletions = numDeletions;
+ this.numMergeOperands = numMergeOperands;
+ this.numRangeDeletions = numRangeDeletions;
+ this.formatVersion = formatVersion;
+ this.fixedKeyLen = fixedKeyLen;
+ this.columnFamilyId = columnFamilyId;
+ this.creationTime = creationTime;
+ this.oldestKeyTime = oldestKeyTime;
+ this.columnFamilyName = columnFamilyName;
+ this.filterPolicyName = filterPolicyName;
+ this.comparatorName = comparatorName;
+ this.mergeOperatorName = mergeOperatorName;
+ this.prefixExtractorName = prefixExtractorName;
+ this.propertyCollectorsNames = propertyCollectorsNames;
+ this.compressionName = compressionName;
+ this.userCollectedProperties = userCollectedProperties;
+ this.readableProperties = readableProperties;
+ this.propertiesOffsets = propertiesOffsets;
+ }
+
+ /**
+ * Get the total size of all data blocks.
+ *
+ * @return the total size of all data blocks.
+ */
+ public long getDataSize() {
+ return dataSize;
+ }
+
+ /**
+ * Get the size of index block.
+ *
+ * @return the size of index block.
+ */
+ public long getIndexSize() {
+ return indexSize;
+ }
+
+ /**
+ * Get the total number of index partitions
+ * if {@link IndexType#kTwoLevelIndexSearch} is used.
+ *
+ * @return the total number of index partitions.
+ */
+ public long getIndexPartitions() {
+ return indexPartitions;
+ }
+
+ /**
+ * Size of the top-level index
+ * if {@link IndexType#kTwoLevelIndexSearch} is used.
+ *
+ * @return the size of the top-level index.
+ */
+ public long getTopLevelIndexSize() {
+ return topLevelIndexSize;
+ }
+
+ /**
+ * Whether the index key is user key.
+ * Otherwise it includes 8 byte of sequence
+ * number added by internal key format.
+ *
+ * @return the index key
+ */
+ public long getIndexKeyIsUserKey() {
+ return indexKeyIsUserKey;
+ }
+
+ /**
+ * Whether delta encoding is used to encode the index values.
+ *
+ * @return whether delta encoding is used to encode the index values.
+ */
+ public long getIndexValueIsDeltaEncoded() {
+ return indexValueIsDeltaEncoded;
+ }
+
+ /**
+ * Get the size of filter block.
+ *
+ * @return the size of filter block.
+ */
+ public long getFilterSize() {
+ return filterSize;
+ }
+
+ /**
+ * Get the total raw key size.
+ *
+ * @return the total raw key size.
+ */
+ public long getRawKeySize() {
+ return rawKeySize;
+ }
+
+ /**
+ * Get the total raw value size.
+ *
+ * @return the total raw value size.
+ */
+ public long getRawValueSize() {
+ return rawValueSize;
+ }
+
+ /**
+ * Get the number of blocks in this table.
+ *
+ * @return the number of blocks in this table.
+ */
+ public long getNumDataBlocks() {
+ return numDataBlocks;
+ }
+
+ /**
+ * Get the number of entries in this table.
+ *
+ * @return the number of entries in this table.
+ */
+ public long getNumEntries() {
+ return numEntries;
+ }
+
+ /**
+ * Get the number of deletions in the table.
+ *
+ * @return the number of deletions in the table.
+ */
+ public long getNumDeletions() {
+ return numDeletions;
+ }
+
+ /**
+ * Get the number of merge operands in the table.
+ *
+ * @return the number of merge operands in the table.
+ */
+ public long getNumMergeOperands() {
+ return numMergeOperands;
+ }
+
+ /**
+ * Get the number of range deletions in this table.
+ *
+ * @return the number of range deletions in this table.
+ */
+ public long getNumRangeDeletions() {
+ return numRangeDeletions;
+ }
+
+ /**
+ * Get the format version, reserved for backward compatibility.
+ *
+ * @return the format version.
+ */
+ public long getFormatVersion() {
+ return formatVersion;
+ }
+
+ /**
+ * Get the length of the keys.
+ *
+ * @return 0 when the key is variable length, otherwise number of
+ * bytes for each key.
+ */
+ public long getFixedKeyLen() {
+ return fixedKeyLen;
+ }
+
+ /**
+ * Get the ID of column family for this SST file,
+ * corresponding to the column family identified by
+ * {@link #getColumnFamilyName()}.
+ *
+ * @return the id of the column family.
+ */
+ public long getColumnFamilyId() {
+ return columnFamilyId;
+ }
+
+ /**
+ * The time when the SST file was created.
+ * Since SST files are immutable, this is equivalent
+ * to last modified time.
+ *
+ * @return the created time.
+ */
+ public long getCreationTime() {
+ return creationTime;
+ }
+
+ /**
+ * Get the timestamp of the earliest key.
+ *
+ * @return 0 means unknown, otherwise the timestamp.
+ */
+ public long getOldestKeyTime() {
+ return oldestKeyTime;
+ }
+
+ /**
+ * Get the name of the column family with which this
+ * SST file is associated.
+ *
+ * @return the name of the column family, or null if the
+ * column family is unknown.
+ */
+ /*@Nullable*/ public byte[] getColumnFamilyName() {
+ return columnFamilyName;
+ }
+
+ /**
+ * Get the name of the filter policy used in this table.
+ *
+ * @return the name of the filter policy, or null if
+ * no filter policy is used.
+ */
+ /*@Nullable*/ public String getFilterPolicyName() {
+ return filterPolicyName;
+ }
+
+ /**
+ * Get the name of the comparator used in this table.
+ *
+ * @return the name of the comparator.
+ */
+ public String getComparatorName() {
+ return comparatorName;
+ }
+
+ /**
+ * Get the name of the merge operator used in this table.
+ *
+ * @return the name of the merge operator, or null if no merge operator
+ * is used.
+ */
+ /*@Nullable*/ public String getMergeOperatorName() {
+ return mergeOperatorName;
+ }
+
+ /**
+ * Get the name of the prefix extractor used in this table.
+ *
+ * @return the name of the prefix extractor, or null if no prefix
+ * extractor is used.
+ */
+ /*@Nullable*/ public String getPrefixExtractorName() {
+ return prefixExtractorName;
+ }
+
+ /**
+ * Get the names of the property collectors factories used in this table.
+ *
+ * @return the names of the property collector factories separated
+ * by commas, e.g. {collector_name[1]},{collector_name[2]},...
+ */
+ public String getPropertyCollectorsNames() {
+ return propertyCollectorsNames;
+ }
+
+ /**
+ * Get the name of the compression algorithm used to compress the SST files.
+ *
+ * @return the name of the compression algorithm.
+ */
+ public String getCompressionName() {
+ return compressionName;
+ }
+
+ /**
+ * Get the user collected properties.
+ *
+ * @return the user collected properties.
+ */
+ public Map<String, String> getUserCollectedProperties() {
+ return userCollectedProperties;
+ }
+
+ /**
+ * Get the readable properties.
+ *
+ * @return the readable properties.
+ */
+ public Map<String, String> getReadableProperties() {
+ return readableProperties;
+ }
+
+ /**
+ * The offset of the value of each property in the file.
+ *
+ * @return the offset of each property.
+ */
+ public Map<String, Long> getPropertiesOffsets() {
+ return propertiesOffsets;
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Map;
+
+public class ThreadStatus {
+ private final long threadId;
+ private final ThreadType threadType;
+ private final String dbName;
+ private final String cfName;
+ private final OperationType operationType;
+ private final long operationElapsedTime; // microseconds
+ private final OperationStage operationStage;
+ private final long operationProperties[];
+ private final StateType stateType;
+
+ /**
+ * Invoked from C++ via JNI
+ */
+ private ThreadStatus(final long threadId,
+ final byte threadTypeValue,
+ final String dbName,
+ final String cfName,
+ final byte operationTypeValue,
+ final long operationElapsedTime,
+ final byte operationStageValue,
+ final long[] operationProperties,
+ final byte stateTypeValue) {
+ this.threadId = threadId;
+ this.threadType = ThreadType.fromValue(threadTypeValue);
+ this.dbName = dbName;
+ this.cfName = cfName;
+ this.operationType = OperationType.fromValue(operationTypeValue);
+ this.operationElapsedTime = operationElapsedTime;
+ this.operationStage = OperationStage.fromValue(operationStageValue);
+ this.operationProperties = operationProperties;
+ this.stateType = StateType.fromValue(stateTypeValue);
+ }
+
+ /**
+ * Get the unique ID of the thread.
+ *
+ * @return the thread id
+ */
+ public long getThreadId() {
+ return threadId;
+ }
+
+ /**
+ * Get the type of the thread.
+ *
+ * @return the type of the thread.
+ */
+ public ThreadType getThreadType() {
+ return threadType;
+ }
+
+ /**
+ * The name of the DB instance that the thread is currently
+ * involved with.
+ *
+ * @return the name of the db, or null if the thread is not involved
+ * in any DB operation.
+ */
+ /* @Nullable */ public String getDbName() {
+ return dbName;
+ }
+
+ /**
+ * The name of the Column Family that the thread is currently
+ * involved with.
+ *
+ * @return the name of the db, or null if the thread is not involved
+ * in any column Family operation.
+ */
+ /* @Nullable */ public String getCfName() {
+ return cfName;
+ }
+
+ /**
+ * Get the operation (high-level action) that the current thread is involved
+ * with.
+ *
+ * @return the operation
+ */
+ public OperationType getOperationType() {
+ return operationType;
+ }
+
+ /**
+ * Get the elapsed time of the current thread operation in microseconds.
+ *
+ * @return the elapsed time
+ */
+ public long getOperationElapsedTime() {
+ return operationElapsedTime;
+ }
+
+ /**
+ * Get the current stage where the thread is involved in the current
+ * operation.
+ *
+ * @return the current stage of the current operation
+ */
+ public OperationStage getOperationStage() {
+ return operationStage;
+ }
+
+ /**
+ * Get the list of properties that describe some details about the current
+ * operation.
+ *
+ * Each field in might have different meanings for different operations.
+ *
+ * @return the properties
+ */
+ public long[] getOperationProperties() {
+ return operationProperties;
+ }
+
+ /**
+ * Get the state (lower-level action) that the current thread is involved
+ * with.
+ *
+ * @return the state
+ */
+ public StateType getStateType() {
+ return stateType;
+ }
+
+ /**
+ * Get the name of the thread type.
+ *
+ * @param threadType the thread type
+ *
+ * @return the name of the thread type.
+ */
+ public static String getThreadTypeName(final ThreadType threadType) {
+ return getThreadTypeName(threadType.getValue());
+ }
+
+ /**
+ * Get the name of an operation given its type.
+ *
+ * @param operationType the type of operation.
+ *
+ * @return the name of the operation.
+ */
+ public static String getOperationName(final OperationType operationType) {
+ return getOperationName(operationType.getValue());
+ }
+
+ public static String microsToString(final long operationElapsedTime) {
+ return microsToStringNative(operationElapsedTime);
+ }
+
+ /**
+ * Obtain a human-readable string describing the specified operation stage.
+ *
+ * @param operationStage the stage of the operation.
+ *
+ * @return the description of the operation stage.
+ */
+ public static String getOperationStageName(
+ final OperationStage operationStage) {
+ return getOperationStageName(operationStage.getValue());
+ }
+
+ /**
+ * Obtain the name of the "i"th operation property of the
+ * specified operation.
+ *
+ * @param operationType the operation type.
+ * @param i the index of the operation property.
+ *
+ * @return the name of the operation property
+ */
+ public static String getOperationPropertyName(
+ final OperationType operationType, final int i) {
+ return getOperationPropertyName(operationType.getValue(), i);
+ }
+
+ /**
+ * Translate the "i"th property of the specified operation given
+ * a property value.
+ *
+ * @param operationType the operation type.
+ * @param operationProperties the operation properties.
+ *
+ * @return the property values.
+ */
+ public static Map<String, Long> interpretOperationProperties(
+ final OperationType operationType, final long[] operationProperties) {
+ return interpretOperationProperties(operationType.getValue(),
+ operationProperties);
+ }
+
+ /**
+ * Obtain the name of a state given its type.
+ *
+ * @param stateType the state type.
+ *
+ * @return the name of the state.
+ */
+ public static String getStateName(final StateType stateType) {
+ return getStateName(stateType.getValue());
+ }
+
+ private static native String getThreadTypeName(final byte threadTypeValue);
+ private static native String getOperationName(final byte operationTypeValue);
+ private static native String microsToStringNative(
+ final long operationElapsedTime);
+ private static native String getOperationStageName(
+ final byte operationStageTypeValue);
+ private static native String getOperationPropertyName(
+ final byte operationTypeValue, final int i);
+ private static native Map<String, Long>interpretOperationProperties(
+ final byte operationTypeValue, final long[] operationProperties);
+ private static native String getStateName(final byte stateTypeValue);
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * The type of a thread.
+ */
+public enum ThreadType {
+ /**
+ * RocksDB BG thread in high-pri thread pool.
+ */
+ HIGH_PRIORITY((byte)0x0),
+
+ /**
+ * RocksDB BG thread in low-pri thread pool.
+ */
+ LOW_PRIORITY((byte)0x1),
+
+ /**
+ * User thread (Non-RocksDB BG thread).
+ */
+ USER((byte)0x2),
+
+ /**
+ * RocksDB BG thread in bottom-pri thread pool
+ */
+ BOTTOM_PRIORITY((byte)0x3);
+
+ private final byte value;
+
+ ThreadType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the Thread type from the internal representation value.
+ *
+ * @param value the internal representation value.
+ *
+ * @return the thread type
+ *
+ * @throws IllegalArgumentException if the value does not match a ThreadType
+ */
+ static ThreadType fromValue(final byte value)
+ throws IllegalArgumentException {
+ for (final ThreadType threadType : ThreadType.values()) {
+ if (threadType.value == value) {
+ return threadType;
+ }
+ }
+ throw new IllegalArgumentException("Unknown value for ThreadType: " + value);
+ }
+}
}
/**
- * @deprecated
- * Exposes internal value of native enum mappings. This method will be marked private in the
- * next major release.
+ * @deprecated Exposes internal value of native enum mappings.
+ * This method will be marked package private in the next major release.
+ *
+ * @return the internal representation
*/
@Deprecated
public byte getValue() {
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * Timed environment.
+ */
+public class TimedEnv extends Env {
+
+ /**
+ * <p>Creates a new environment that measures function call times for
+ * filesystem operations, reporting results to variables in PerfContext.</p>
+ *
+ *
+ * <p>The caller must delete the result when it is
+ * no longer needed.</p>
+ *
+ * @param baseEnv the base environment,
+ * must remain live while the result is in use.
+ */
+ public TimedEnv(final Env baseEnv) {
+ super(createTimedEnv(baseEnv.nativeHandle_));
+ }
+
+ private static native long createTimedEnv(final long baseEnvHandle);
+ @Override protected final native void disposeInternal(final long handle);
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * TraceOptions is used for
+ * {@link RocksDB#startTrace(TraceOptions, AbstractTraceWriter)}.
+ */
+public class TraceOptions {
+ private final long maxTraceFileSize;
+
+ public TraceOptions() {
+ this.maxTraceFileSize = 64 * 1024 * 1024 * 1024; // 64 GB
+ }
+
+ public TraceOptions(final long maxTraceFileSize) {
+ this.maxTraceFileSize = maxTraceFileSize;
+ }
+
+ /**
+ * To avoid the trace file size grows large than the storage space,
+ * user can set the max trace file size in Bytes. Default is 64GB
+ *
+ * @return the max trace size
+ */
+ public long getMaxTraceFileSize() {
+ return maxTraceFileSize;
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+/**
+ * TraceWriter allows exporting RocksDB traces to any system,
+ * one operation at a time.
+ */
+public interface TraceWriter {
+
+ /**
+ * Write the data.
+ *
+ * @param data the data
+ *
+ * @throws RocksDBException if an error occurs whilst writing.
+ */
+ void write(final Slice data) throws RocksDBException;
+
+ /**
+ * Close the writer.
+ *
+ * @throws RocksDBException if an error occurs whilst closing the writer.
+ */
+ void closeWriter() throws RocksDBException;
+
+ /**
+ * Get the size of the file that this writer is writing to.
+ *
+ * @return the file size
+ */
+ long getFileSize();
+}
return tdb;
}
+ /**
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * This is similar to {@link #closeE()} except that it
+ * silently ignores any errors.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
@Override
public Transaction beginTransaction(final WriteOptions writeOptions) {
return new Transaction(this, beginTransaction(nativeHandle_,
this.transactionDbOptions_ = transactionDbOptions;
}
+ @Override protected final native void disposeInternal(final long handle);
+
private static native long open(final long optionsHandle,
final long transactionDbOptionsHandle, final String path)
throws RocksDBException;
private static native long[] open(final long dbOptionsHandle,
final long transactionDbOptionsHandle, final String path,
final byte[][] columnFamilyNames, final long[] columnFamilyOptions);
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
private native long beginTransaction(final long handle,
final long writeOptionsHandle);
private native long beginTransaction(final long handle,
private native DeadlockPath[] getDeadlockInfoBuffer(final long handle);
private native void setDeadlockInfoBufferSize(final long handle,
final int targetSize);
- @Override protected final native void disposeInternal(final long handle);
}
return ttlDB;
}
+ /**
+ * <p>Close the TtlDB instance and release resource.</p>
+ *
+ * This is similar to {@link #close()} except that it
+ * throws an exception if any error occurs.
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ *
+ * @throws RocksDBException if an error occurs whilst closing.
+ */
+ public void closeE() throws RocksDBException {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
+ /**
+ * <p>Close the TtlDB instance and release resource.</p>
+ *
+ *
+ * This will not fsync the WAL files.
+ * If syncing is required, the caller must first call {@link #syncWal()}
+ * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch
+ * with {@link WriteOptions#setSync(boolean)} set to true.
+ *
+ * See also {@link #close()}.
+ */
+ @Override
+ public void close() {
+ if (owningHandle_.compareAndSet(true, false)) {
+ try {
+ closeDatabase(nativeHandle_);
+ } catch (final RocksDBException e) {
+ // silently ignore the error report
+ } finally {
+ disposeInternal();
+ }
+ }
+ }
+
/**
* <p>Creates a new ttl based column family with a name defined
* in given ColumnFamilyDescriptor and allocates a
final int ttl) throws RocksDBException {
return new ColumnFamilyHandle(this,
createColumnFamilyWithTtl(nativeHandle_,
- columnFamilyDescriptor.columnFamilyName(),
- columnFamilyDescriptor.columnFamilyOptions().nativeHandle_, ttl));
- }
-
- /**
- * <p>Close the TtlDB instance and release resource.</p>
- *
- * <p>Internally, TtlDB owns the {@code rocksdb::DB} pointer
- * to its associated {@link org.rocksdb.RocksDB}. The release
- * of that RocksDB pointer is handled in the destructor of the
- * c++ {@code rocksdb::TtlDB} and should be transparent to
- * Java developers.</p>
- */
- @Override
- public void close() {
- super.close();
+ columnFamilyDescriptor.getName(),
+ columnFamilyDescriptor.getOptions().nativeHandle_, ttl));
}
/**
super(nativeHandle);
}
- @Override protected void finalize() throws Throwable {
- close(); //TODO(AR) revisit here when implementing AutoCloseable
- super.finalize();
- }
+ @Override protected native void disposeInternal(final long handle);
private native static long open(final long optionsHandle,
final String db_path, final int ttl, final boolean readOnly)
private native long createColumnFamilyWithTtl(final long handle,
final byte[] columnFamilyName, final long columnFamilyOptions, int ttl)
throws RocksDBException;
+ private native static void closeDatabase(final long handle)
+ throws RocksDBException;
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public enum WalFileType {
+ /**
+ * Indicates that WAL file is in archive directory. WAL files are moved from
+ * the main db directory to archive directory once they are not live and stay
+ * there until cleaned up. Files are cleaned depending on archive size
+ * (Options::WAL_size_limit_MB) and time since last cleaning
+ * (Options::WAL_ttl_seconds).
+ */
+ kArchivedLogFile((byte)0x0),
+
+ /**
+ * Indicates that WAL file is live and resides in the main db directory
+ */
+ kAliveLogFile((byte)0x1);
+
+ private final byte value;
+
+ WalFileType(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation value.
+ *
+ * @return the internal representation value
+ */
+ byte getValue() {
+ return value;
+ }
+
+ /**
+ * Get the WalFileType from the internal representation value.
+ *
+ * @return the wal file type.
+ *
+ * @throws IllegalArgumentException if the value is unknown.
+ */
+ static WalFileType fromValue(final byte value) {
+ for (final WalFileType walFileType : WalFileType.values()) {
+ if(walFileType.value == value) {
+ return walFileType;
+ }
+ }
+
+ throw new IllegalArgumentException(
+ "Illegal value provided for WalFileType: " + value);
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.util.Map;
+
+/**
+ * WALFilter allows an application to inspect write-ahead-log (WAL)
+ * records or modify their processing on recovery.
+ */
+public interface WalFilter {
+
+ /**
+ * Provide ColumnFamily->LogNumber map to filter
+ * so that filter can determine whether a log number applies to a given
+ * column family (i.e. that log hasn't been flushed to SST already for the
+ * column family).
+ *
+ * We also pass in name>id map as only name is known during
+ * recovery (as handles are opened post-recovery).
+ * while write batch callbacks happen in terms of column family id.
+ *
+ * @param cfLognumber column_family_id to lognumber map
+ * @param cfNameId column_family_name to column_family_id map
+ */
+ void columnFamilyLogNumberMap(final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId);
+
+ /**
+ * LogRecord is invoked for each log record encountered for all the logs
+ * during replay on logs on recovery. This method can be used to:
+ * * inspect the record (using the batch parameter)
+ * * ignoring current record
+ * (by returning WalProcessingOption::kIgnoreCurrentRecord)
+ * * reporting corrupted record
+ * (by returning WalProcessingOption::kCorruptedRecord)
+ * * stop log replay
+ * (by returning kStop replay) - please note that this implies
+ * discarding the logs from current record onwards.
+ *
+ * @param logNumber log number of the current log.
+ * Filter might use this to determine if the log
+ * record is applicable to a certain column family.
+ * @param logFileName log file name - only for informational purposes
+ * @param batch batch encountered in the log during recovery
+ * @param newBatch new batch to populate if filter wants to change
+ * the batch (for example to filter some records out, or alter some
+ * records). Please note that the new batch MUST NOT contain
+ * more records than original, else recovery would be failed.
+ *
+ * @return Processing option for the current record.
+ */
+ LogRecordFoundResult logRecordFound(final long logNumber,
+ final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch);
+
+ class LogRecordFoundResult {
+ public static LogRecordFoundResult CONTINUE_UNCHANGED =
+ new LogRecordFoundResult(WalProcessingOption.CONTINUE_PROCESSING, false);
+
+ final WalProcessingOption walProcessingOption;
+ final boolean batchChanged;
+
+ /**
+ * @param walProcessingOption the processing option
+ * @param batchChanged Whether batch was changed by the filter.
+ * It must be set to true if newBatch was populated,
+ * else newBatch has no effect.
+ */
+ public LogRecordFoundResult(final WalProcessingOption walProcessingOption,
+ final boolean batchChanged) {
+ this.walProcessingOption = walProcessingOption;
+ this.batchChanged = batchChanged;
+ }
+ }
+
+ /**
+ * Returns a name that identifies this WAL filter.
+ * The name will be printed to LOG file on start up for diagnosis.
+ *
+ * @return the name
+ */
+ String name();
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+public enum WalProcessingOption {
+ /**
+ * Continue processing as usual.
+ */
+ CONTINUE_PROCESSING((byte)0x0),
+
+ /**
+ * Ignore the current record but continue processing of log(s).
+ */
+ IGNORE_CURRENT_RECORD((byte)0x1),
+
+ /**
+ * Stop replay of logs and discard logs.
+ * Logs won't be replayed on subsequent recovery.
+ */
+ STOP_REPLAY((byte)0x2),
+
+ /**
+ * Corrupted record detected by filter.
+ */
+ CORRUPTED_RECORD((byte)0x3);
+
+ private final byte value;
+
+ WalProcessingOption(final byte value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the internal representation.
+ *
+ * @return the internal representation.
+ */
+ byte getValue() {
+ return value;
+ }
+
+ public static WalProcessingOption fromValue(final byte value) {
+ for (final WalProcessingOption walProcessingOption : WalProcessingOption.values()) {
+ if (walProcessingOption.value == value) {
+ return walProcessingOption;
+ }
+ }
+ throw new IllegalArgumentException(
+ "Illegal value provided for WalProcessingOption: " + value);
+ }
+}
-package org.rocksdb;
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
-import org.rocksdb.Cache;
+package org.rocksdb;
/**
* Java wrapper over native write_buffer_manager class
return noSlowdown(nativeHandle_);
}
+ /**
+ * If true, this write request is of lower priority if compaction is
+ * behind. In this case that, {@link #noSlowdown()} == true, the request
+ * will be cancelled immediately with {@link Status.Code#Incomplete} returned.
+ * Otherwise, it will be slowed down. The slowdown value is determined by
+ * RocksDB to guarantee it introduces minimum impacts to high priority writes.
+ *
+ * Default: false
+ *
+ * @param lowPri true if the write request should be of lower priority than
+ * compactions which are behind.
+ *
+ * @return the instance of the current WriteOptions.
+ */
+ public WriteOptions setLowPri(final boolean lowPri) {
+ setLowPri(nativeHandle_, lowPri);
+ return this;
+ }
+
+ /**
+ * Returns true if this write request is of lower priority if compaction is
+ * behind.
+ *
+ * See {@link #setLowPri(boolean)}.
+ *
+ * @return true if this write request is of lower priority, false otherwise.
+ */
+ public boolean lowPri() {
+ return lowPri(nativeHandle_);
+ }
+
private native static long newWriteOptions();
private native static long copyWriteOptions(long handle);
+ @Override protected final native void disposeInternal(final long handle);
+
private native void setSync(long handle, boolean flag);
private native boolean sync(long handle);
private native void setDisableWAL(long handle, boolean flag);
private native void setNoSlowdown(final long handle,
final boolean noSlowdown);
private native boolean noSlowdown(final long handle);
- @Override protected final native void disposeInternal(final long handle);
+ private native void setLowPri(final long handle, final boolean lowPri);
+ private native boolean lowPri(final long handle);
}
assertThat(backupableDBOptions.backupEnv()).
isNull();
- try(final Env env = new RocksMemEnv()) {
+ try(final Env env = new RocksMemEnv(Env.getDefault())) {
backupableDBOptions.setBackupEnv(env);
assertThat(backupableDBOptions.backupEnv())
.isEqualTo(env);
package org.rocksdb;
import org.junit.ClassRule;
+import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@Rule public TemporaryFolder dbFolder = new TemporaryFolder();
+ @Test
+ public void cacheIndexAndFilterBlocks() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setCacheIndexAndFilterBlocks(true);
+ assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()).
+ isTrue();
+
+ }
+
+ @Test
+ public void cacheIndexAndFilterBlocksWithHighPriority() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true);
+ assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocksWithHighPriority()).
+ isTrue();
+ }
+
+ @Test
+ public void pinL0FilterAndIndexBlocksInCache() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true);
+ assertThat(blockBasedTableConfig.pinL0FilterAndIndexBlocksInCache()).
+ isTrue();
+ }
+
+ @Test
+ public void pinTopLevelIndexAndFilter() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setPinTopLevelIndexAndFilter(false);
+ assertThat(blockBasedTableConfig.pinTopLevelIndexAndFilter()).
+ isFalse();
+ }
+
+ @Test
+ public void indexType() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ assertThat(IndexType.values().length).isEqualTo(3);
+ blockBasedTableConfig.setIndexType(IndexType.kHashSearch);
+ assertThat(blockBasedTableConfig.indexType().equals(
+ IndexType.kHashSearch));
+ assertThat(IndexType.valueOf("kBinarySearch")).isNotNull();
+ blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch"));
+ assertThat(blockBasedTableConfig.indexType().equals(
+ IndexType.kBinarySearch));
+ }
+
+ @Test
+ public void dataBlockIndexType() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinaryAndHash);
+ assertThat(blockBasedTableConfig.dataBlockIndexType().equals(
+ DataBlockIndexType.kDataBlockBinaryAndHash));
+ blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch);
+ assertThat(blockBasedTableConfig.dataBlockIndexType().equals(
+ DataBlockIndexType.kDataBlockBinarySearch));
+ }
+
+ @Test
+ public void checksumType() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ assertThat(ChecksumType.values().length).isEqualTo(3);
+ assertThat(ChecksumType.valueOf("kxxHash")).
+ isEqualTo(ChecksumType.kxxHash);
+ blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum);
+ blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash);
+ assertThat(blockBasedTableConfig.checksumType().equals(
+ ChecksumType.kxxHash));
+ }
+
@Test
public void noBlockCache() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
blockBasedTableConfig.setNoBlockCache(true);
assertThat(blockBasedTableConfig.noBlockCache()).isTrue();
}
@Test
- public void blockCacheSize() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setBlockCacheSize(8 * 1024);
- assertThat(blockBasedTableConfig.blockCacheSize()).
- isEqualTo(8 * 1024);
+ public void blockCache() {
+ try (
+ final Cache cache = new LRUCache(17 * 1024 * 1024);
+ final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setBlockCache(cache))) {
+ assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
+ }
}
@Test
- public void sharedBlockCache() throws RocksDBException {
+ public void blockCacheIntegration() throws RocksDBException {
try (final Cache cache = new LRUCache(8 * 1024 * 1024);
final Statistics statistics = new Statistics()) {
for (int shard = 0; shard < 8; shard++) {
}
@Test
- public void blockSizeDeviation() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setBlockSizeDeviation(12);
- assertThat(blockBasedTableConfig.blockSizeDeviation()).
- isEqualTo(12);
+ public void persistentCache() throws RocksDBException {
+ try (final DBOptions dbOptions = new DBOptions().
+ setInfoLogLevel(InfoLogLevel.INFO_LEVEL).
+ setCreateIfMissing(true);
+ final Logger logger = new Logger(dbOptions) {
+ @Override
+ protected void log(final InfoLogLevel infoLogLevel, final String logMsg) {
+ System.out.println(infoLogLevel.name() + ": " + logMsg);
+ }
+ }) {
+ try (final PersistentCache persistentCache =
+ new PersistentCache(Env.getDefault(), dbFolder.getRoot().getPath(), 1024 * 1024 * 100, logger, false);
+ final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setPersistentCache(persistentCache))) {
+ assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
+ }
+ }
}
@Test
- public void blockRestartInterval() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setBlockRestartInterval(15);
- assertThat(blockBasedTableConfig.blockRestartInterval()).
- isEqualTo(15);
+ public void blockCacheCompressed() {
+ try (final Cache cache = new LRUCache(17 * 1024 * 1024);
+ final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setBlockCacheCompressed(cache))) {
+ assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
+ }
}
+ @Ignore("See issue: https://github.com/facebook/rocksdb/issues/4822")
@Test
- public void wholeKeyFiltering() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setWholeKeyFiltering(false);
- assertThat(blockBasedTableConfig.wholeKeyFiltering()).
- isFalse();
+ public void blockCacheCompressedIntegration() throws RocksDBException {
+ final byte[] key1 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] key2 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] key3 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] key4 = "some-key1".getBytes(StandardCharsets.UTF_8);
+ final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8);
+
+ try (final Cache compressedCache = new LRUCache(8 * 1024 * 1024);
+ final Statistics statistics = new Statistics()) {
+
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig()
+ .setNoBlockCache(true)
+ .setBlockCache(null)
+ .setBlockCacheCompressed(compressedCache)
+ .setFormatVersion(4);
+
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setStatistics(statistics)
+ .setTableFormatConfig(blockBasedTableConfig)) {
+
+ for (int shard = 0; shard < 8; shard++) {
+ try (final FlushOptions flushOptions = new FlushOptions();
+ final WriteOptions writeOptions = new WriteOptions();
+ final ReadOptions readOptions = new ReadOptions();
+ final RocksDB db =
+ RocksDB.open(options, dbFolder.getRoot().getAbsolutePath() + "/" + shard)) {
+
+ db.put(writeOptions, key1, value);
+ db.put(writeOptions, key2, value);
+ db.put(writeOptions, key3, value);
+ db.put(writeOptions, key4, value);
+ db.flush(flushOptions);
+
+ db.get(readOptions, key1);
+ db.get(readOptions, key2);
+ db.get(readOptions, key3);
+ db.get(readOptions, key4);
+
+ assertThat(statistics.getTickerCount(TickerType.BLOCK_CACHE_COMPRESSED_ADD)).isEqualTo(shard + 1);
+ }
+ }
+ }
+ }
}
@Test
- public void cacheIndexAndFilterBlocks() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setCacheIndexAndFilterBlocks(true);
- assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()).
- isTrue();
-
+ public void blockSize() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockSize(10);
+ assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10);
}
@Test
- public void cacheIndexAndFilterBlocksWithHighPriority() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true);
- assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocksWithHighPriority()).
- isTrue();
+ public void blockSizeDeviation() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockSizeDeviation(12);
+ assertThat(blockBasedTableConfig.blockSizeDeviation()).
+ isEqualTo(12);
}
@Test
- public void pinL0FilterAndIndexBlocksInCache() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true);
- assertThat(blockBasedTableConfig.pinL0FilterAndIndexBlocksInCache()).
- isTrue();
+ public void blockRestartInterval() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockRestartInterval(15);
+ assertThat(blockBasedTableConfig.blockRestartInterval()).
+ isEqualTo(15);
}
@Test
- public void partitionFilters() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setPartitionFilters(true);
- assertThat(blockBasedTableConfig.partitionFilters()).
- isTrue();
+ public void indexBlockRestartInterval() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setIndexBlockRestartInterval(15);
+ assertThat(blockBasedTableConfig.indexBlockRestartInterval()).
+ isEqualTo(15);
}
@Test
public void metadataBlockSize() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
blockBasedTableConfig.setMetadataBlockSize(1024);
assertThat(blockBasedTableConfig.metadataBlockSize()).
- isEqualTo(1024);
+ isEqualTo(1024);
}
@Test
- public void pinTopLevelIndexAndFilter() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setPinTopLevelIndexAndFilter(false);
- assertThat(blockBasedTableConfig.pinTopLevelIndexAndFilter()).
- isFalse();
+ public void partitionFilters() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setPartitionFilters(true);
+ assertThat(blockBasedTableConfig.partitionFilters()).
+ isTrue();
}
@Test
- public void hashIndexAllowCollision() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setHashIndexAllowCollision(false);
- assertThat(blockBasedTableConfig.hashIndexAllowCollision()).
+ public void useDeltaEncoding() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setUseDeltaEncoding(false);
+ assertThat(blockBasedTableConfig.useDeltaEncoding()).
isFalse();
}
@Test
- public void blockCacheCompressedSize() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setBlockCacheCompressedSize(40);
- assertThat(blockBasedTableConfig.blockCacheCompressedSize()).
- isEqualTo(40);
+ public void blockBasedTableWithFilterPolicy() {
+ try(final Options options = new Options()
+ .setTableFormatConfig(new BlockBasedTableConfig()
+ .setFilterPolicy(new BloomFilter(10)))) {
+ assertThat(options.tableFactoryName()).
+ isEqualTo("BlockBasedTable");
+ }
}
@Test
- public void checksumType() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- assertThat(ChecksumType.values().length).isEqualTo(3);
- assertThat(ChecksumType.valueOf("kxxHash")).
- isEqualTo(ChecksumType.kxxHash);
- blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum);
- blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash);
- assertThat(blockBasedTableConfig.checksumType().equals(
- ChecksumType.kxxHash));
+ public void blockBasedTableWithoutFilterPolicy() {
+ try(final Options options = new Options().setTableFormatConfig(
+ new BlockBasedTableConfig().setFilterPolicy(null))) {
+ assertThat(options.tableFactoryName()).
+ isEqualTo("BlockBasedTable");
+ }
}
@Test
- public void indexType() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- assertThat(IndexType.values().length).isEqualTo(3);
- blockBasedTableConfig.setIndexType(IndexType.kHashSearch);
- assertThat(blockBasedTableConfig.indexType().equals(
- IndexType.kHashSearch));
- assertThat(IndexType.valueOf("kBinarySearch")).isNotNull();
- blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch"));
- assertThat(blockBasedTableConfig.indexType().equals(
- IndexType.kBinarySearch));
+ public void wholeKeyFiltering() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setWholeKeyFiltering(false);
+ assertThat(blockBasedTableConfig.wholeKeyFiltering()).
+ isFalse();
}
@Test
- public void blockCacheCompressedNumShardBits() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4);
- assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()).
- isEqualTo(4);
+ public void verifyCompression() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setVerifyCompression(true);
+ assertThat(blockBasedTableConfig.verifyCompression()).
+ isTrue();
}
@Test
- public void cacheNumShardBits() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setCacheNumShardBits(5);
- assertThat(blockBasedTableConfig.cacheNumShardBits()).
- isEqualTo(5);
+ public void readAmpBytesPerBit() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setReadAmpBytesPerBit(2);
+ assertThat(blockBasedTableConfig.readAmpBytesPerBit()).
+ isEqualTo(2);
}
@Test
- public void blockSize() {
- BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
- blockBasedTableConfig.setBlockSize(10);
- assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10);
+ public void formatVersion() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ for (int version = 0; version < 5; version++) {
+ blockBasedTableConfig.setFormatVersion(version);
+ assertThat(blockBasedTableConfig.formatVersion()).isEqualTo(version);
+ }
+ }
+
+ @Test(expected = AssertionError.class)
+ public void formatVersionFailNegative() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setFormatVersion(-1);
}
+ @Test(expected = AssertionError.class)
+ public void formatVersionFailIllegalVersion() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setFormatVersion(99);
+ }
@Test
- public void blockBasedTableWithFilter() {
- try(final Options options = new Options()
- .setTableFormatConfig(new BlockBasedTableConfig()
- .setFilter(new BloomFilter(10)))) {
- assertThat(options.tableFactoryName()).
- isEqualTo("BlockBasedTable");
- }
+ public void enableIndexCompression() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setEnableIndexCompression(false);
+ assertThat(blockBasedTableConfig.enableIndexCompression()).
+ isFalse();
}
@Test
- public void blockBasedTableWithoutFilter() {
- try(final Options options = new Options().setTableFormatConfig(
- new BlockBasedTableConfig().setFilter(null))) {
- assertThat(options.tableFactoryName()).
- isEqualTo("BlockBasedTable");
- }
+ public void blockAlign() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockAlign(true);
+ assertThat(blockBasedTableConfig.blockAlign()).
+ isTrue();
}
+ @Deprecated
@Test
- public void blockBasedTableWithBlockCache() {
- try (final Options options = new Options().setTableFormatConfig(
- new BlockBasedTableConfig().setBlockCache(new LRUCache(17 * 1024 * 1024)))) {
- assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable");
- }
+ public void hashIndexAllowCollision() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setHashIndexAllowCollision(false);
+ assertThat(blockBasedTableConfig.hashIndexAllowCollision()).
+ isTrue(); // NOTE: setHashIndexAllowCollision should do nothing!
}
+ @Deprecated
@Test
- public void blockBasedTableFormatVersion() {
- BlockBasedTableConfig config = new BlockBasedTableConfig();
- for (int version=0; version<=2; version++) {
- config.setFormatVersion(version);
- assertThat(config.formatVersion()).isEqualTo(version);
- }
+ public void blockCacheSize() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockCacheSize(8 * 1024);
+ assertThat(blockBasedTableConfig.blockCacheSize()).
+ isEqualTo(8 * 1024);
}
- @Test(expected = AssertionError.class)
- public void blockBasedTableFormatVersionFailNegative() {
- BlockBasedTableConfig config = new BlockBasedTableConfig();
- config.setFormatVersion(-1);
+ @Deprecated
+ @Test
+ public void blockCacheNumShardBits() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setCacheNumShardBits(5);
+ assertThat(blockBasedTableConfig.cacheNumShardBits()).
+ isEqualTo(5);
}
- @Test(expected = AssertionError.class)
- public void blockBasedTableFormatVersionFailIllegalVersion() {
- BlockBasedTableConfig config = new BlockBasedTableConfig();
- config.setFormatVersion(3);
+ @Deprecated
+ @Test
+ public void blockCacheCompressedSize() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockCacheCompressedSize(40);
+ assertThat(blockBasedTableConfig.blockCacheCompressedSize()).
+ isEqualTo(40);
+ }
+
+ @Deprecated
+ @Test
+ public void blockCacheCompressedNumShardBits() {
+ final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig();
+ blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4);
+ assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()).
+ isEqualTo(4);
}
}
}
}
+ @Test
+ public void bottommostCompressionOptions() {
+ try (final ColumnFamilyOptions columnFamilyOptions =
+ new ColumnFamilyOptions();
+ final CompressionOptions bottommostCompressionOptions =
+ new CompressionOptions()
+ .setMaxDictBytes(123)) {
+
+ columnFamilyOptions.setBottommostCompressionOptions(
+ bottommostCompressionOptions);
+ assertThat(columnFamilyOptions.bottommostCompressionOptions())
+ .isEqualTo(bottommostCompressionOptions);
+ assertThat(columnFamilyOptions.bottommostCompressionOptions()
+ .maxDictBytes()).isEqualTo(123);
+ }
+ }
+
@Test
public void compressionOptions() {
try (final ColumnFamilyOptions columnFamilyOptions
}
}
+ @Test
+ public void ttl() {
+ try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) {
+ options.setTtl(1000 * 60);
+ assertThat(options.ttl()).
+ isEqualTo(1000 * 60);
+ }
+ }
+
@Test
public void compactionOptionsUniversal() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions();
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionJobInfoTest {
+
+ @ClassRule
+ public static final RocksMemoryResource rocksMemoryResource =
+ new RocksMemoryResource();
+
+ @Test
+ public void columnFamilyName() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.columnFamilyName())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void status() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.status().getCode())
+ .isEqualTo(Status.Code.Ok);
+ }
+ }
+
+ @Test
+ public void threadId() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.threadId())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void jobId() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.jobId())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void baseInputLevel() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.baseInputLevel())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void outputLevel() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.outputLevel())
+ .isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void inputFiles() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.inputFiles())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void outputFiles() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.outputFiles())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void tableProperties() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.tableProperties())
+ .isEmpty();
+ }
+ }
+
+ @Test
+ public void compactionReason() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.compactionReason())
+ .isEqualTo(CompactionReason.kUnknown);
+ }
+ }
+
+ @Test
+ public void compression() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.compression())
+ .isEqualTo(CompressionType.NO_COMPRESSION);
+ }
+ }
+
+ @Test
+ public void stats() {
+ try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) {
+ assertThat(compactionJobInfo.stats())
+ .isNotNull();
+ }
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionJobStatsTest {
+
+ @ClassRule
+ public static final RocksMemoryResource rocksMemoryResource =
+ new RocksMemoryResource();
+
+ @Test
+ public void reset() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ compactionJobStats.reset();
+ assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void add() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats();
+ final CompactionJobStats otherCompactionJobStats = new CompactionJobStats()) {
+ compactionJobStats.add(otherCompactionJobStats);
+ }
+ }
+
+ @Test
+ public void elapsedMicros() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputFiles() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputFiles()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputFilesAtOutputLevel() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputFilesAtOutputLevel()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numOutputRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numOutputRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numOutputFiles() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numOutputFiles()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void isManualCompaction() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.isManualCompaction()).isFalse();
+ }
+ }
+
+ @Test
+ public void totalInputBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalInputBytes()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void totalOutputBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalOutputBytes()).isEqualTo(0);
+ }
+ }
+
+
+ @Test
+ public void numRecordsReplaced() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numRecordsReplaced()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void totalInputRawKeyBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalInputRawKeyBytes()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void totalInputRawValueBytes() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.totalInputRawValueBytes()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numInputDeletionRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numInputDeletionRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numExpiredDeletionRecords() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numExpiredDeletionRecords()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numCorruptKeys() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numCorruptKeys()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void fileWriteNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.fileWriteNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void fileRangeSyncNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.fileRangeSyncNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void fileFsyncNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.fileFsyncNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void filePrepareWriteNanos() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.filePrepareWriteNanos()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void smallestOutputKeyPrefix() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.smallestOutputKeyPrefix()).isEmpty();
+ }
+ }
+
+ @Test
+ public void largestOutputKeyPrefix() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.largestOutputKeyPrefix()).isEmpty();
+ }
+ }
+
+ @Test
+ public void numSingleDelFallthru() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numSingleDelFallthru()).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void numSingleDelMismatch() {
+ try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) {
+ assertThat(compactionJobStats.numSingleDelMismatch()).isEqualTo(0);
+ }
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CompactionOptionsTest {
+
+ @ClassRule
+ public static final RocksMemoryResource rocksMemoryResource =
+ new RocksMemoryResource();
+
+ @Test
+ public void compression() {
+ try (final CompactionOptions compactionOptions = new CompactionOptions()) {
+ assertThat(compactionOptions.compression())
+ .isEqualTo(CompressionType.SNAPPY_COMPRESSION);
+ compactionOptions.setCompression(CompressionType.NO_COMPRESSION);
+ assertThat(compactionOptions.compression())
+ .isEqualTo(CompressionType.NO_COMPRESSION);
+ }
+ }
+
+ @Test
+ public void outputFileSizeLimit() {
+ final long mb250 = 1024 * 1024 * 250;
+ try (final CompactionOptions compactionOptions = new CompactionOptions()) {
+ assertThat(compactionOptions.outputFileSizeLimit())
+ .isEqualTo(-1);
+ compactionOptions.setOutputFileSizeLimit(mb250);
+ assertThat(compactionOptions.outputFileSizeLimit())
+ .isEqualTo(mb250);
+ }
+ }
+
+ @Test
+ public void maxSubcompactions() {
+ try (final CompactionOptions compactionOptions = new CompactionOptions()) {
+ assertThat(compactionOptions.maxSubcompactions())
+ .isEqualTo(0);
+ compactionOptions.setMaxSubcompactions(9);
+ assertThat(compactionOptions.maxSubcompactions())
+ .isEqualTo(9);
+ }
+ }
+}
assertThat(opt.maxDictBytes()).isEqualTo(maxDictBytes);
}
}
+
+ @Test
+ public void zstdMaxTrainBytes() {
+ final int zstdMaxTrainBytes = 999;
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ opt.setZStdMaxTrainBytes(zstdMaxTrainBytes);
+ assertThat(opt.zstdMaxTrainBytes()).isEqualTo(zstdMaxTrainBytes);
+ }
+ }
+
+ @Test
+ public void enabled() {
+ try(final CompressionOptions opt = new CompressionOptions()) {
+ assertThat(opt.enabled()).isFalse();
+ opt.setEnabled(true);
+ assertThat(opt.enabled()).isTrue();
+ }
+ }
}
}
}
+ @Test
+ public void enablePipelinedWrite() {
+ try(final DBOptions opt = new DBOptions()) {
+ assertThat(opt.enablePipelinedWrite()).isFalse();
+ opt.setEnablePipelinedWrite(true);
+ assertThat(opt.enablePipelinedWrite()).isTrue();
+ }
+ }
+
@Test
public void allowConcurrentMemtableWrite() {
try (final DBOptions opt = new DBOptions()) {
}
}
+ @Test
+ public void walFilter() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.walFilter()).isNull();
+
+ try (final AbstractWalFilter walFilter = new AbstractWalFilter() {
+ @Override
+ public void columnFamilyLogNumberMap(
+ final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId) {
+ // no-op
+ }
+
+ @Override
+ public LogRecordFoundResult logRecordFound(final long logNumber,
+ final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch) {
+ return new LogRecordFoundResult(
+ WalProcessingOption.CONTINUE_PROCESSING, false);
+ }
+
+ @Override
+ public String name() {
+ return "test-wal-filter";
+ }
+ }) {
+ opt.setWalFilter(walFilter);
+ assertThat(opt.walFilter()).isEqualTo(walFilter);
+ }
+ }
+ }
+
@Test
public void failIfOptionsFileError() {
try (final DBOptions opt = new DBOptions()) {
}
}
+ @Test
+ public void allowIngestBehind() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.allowIngestBehind()).isFalse();
+ opt.setAllowIngestBehind(true);
+ assertThat(opt.allowIngestBehind()).isTrue();
+ }
+ }
+
+ @Test
+ public void preserveDeletes() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.preserveDeletes()).isFalse();
+ opt.setPreserveDeletes(true);
+ assertThat(opt.preserveDeletes()).isTrue();
+ }
+ }
+
+ @Test
+ public void twoWriteQueues() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.twoWriteQueues()).isFalse();
+ opt.setTwoWriteQueues(true);
+ assertThat(opt.twoWriteQueues()).isTrue();
+ }
+ }
+
+ @Test
+ public void manualWalFlush() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.manualWalFlush()).isFalse();
+ opt.setManualWalFlush(true);
+ assertThat(opt.manualWalFlush()).isTrue();
+ }
+ }
+
+ @Test
+ public void atomicFlush() {
+ try (final DBOptions opt = new DBOptions()) {
+ assertThat(opt.atomicFlush()).isFalse();
+ opt.setAtomicFlush(true);
+ assertThat(opt.atomicFlush()).isTrue();
+ }
+ }
+
@Test
public void rateLimiter() {
try(final DBOptions options = new DBOptions();
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.Collection;
+import java.util.List;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class DefaultEnvTest {
+
+ @ClassRule
+ public static final RocksMemoryResource rocksMemoryResource =
+ new RocksMemoryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void backgroundThreads() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.setBackgroundThreads(5, Priority.BOTTOM);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isEqualTo(5);
+
+ defaultEnv.setBackgroundThreads(5);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5);
+
+ defaultEnv.setBackgroundThreads(5, Priority.LOW);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5);
+
+ defaultEnv.setBackgroundThreads(5, Priority.HIGH);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isEqualTo(5);
+ }
+ }
+
+ @Test
+ public void threadPoolQueueLen() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ assertThat(defaultEnv.getThreadPoolQueueLen(Priority.BOTTOM)).isEqualTo(0);
+ assertThat(defaultEnv.getThreadPoolQueueLen(Priority.LOW)).isEqualTo(0);
+ assertThat(defaultEnv.getThreadPoolQueueLen(Priority.HIGH)).isEqualTo(0);
+ }
+ }
+
+ @Test
+ public void incBackgroundThreadsIfNeeded() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.BOTTOM);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isEqualTo(20);
+
+ defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.LOW);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(20);
+
+ defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.HIGH);
+ assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isEqualTo(20);
+ }
+ }
+
+ @Test
+ public void lowerThreadPoolIOPriority() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.lowerThreadPoolIOPriority(Priority.BOTTOM);
+
+ defaultEnv.lowerThreadPoolIOPriority(Priority.LOW);
+
+ defaultEnv.lowerThreadPoolIOPriority(Priority.HIGH);
+ }
+ }
+
+ @Test
+ public void lowerThreadPoolCPUPriority() {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ defaultEnv.lowerThreadPoolCPUPriority(Priority.BOTTOM);
+
+ defaultEnv.lowerThreadPoolCPUPriority(Priority.LOW);
+
+ defaultEnv.lowerThreadPoolCPUPriority(Priority.HIGH);
+ }
+ }
+
+ @Test
+ public void threadList() throws RocksDBException {
+ try (final Env defaultEnv = RocksEnv.getDefault()) {
+ final Collection<ThreadStatus> threadList = defaultEnv.getThreadList();
+ assertThat(threadList.size()).isGreaterThan(0);
+ }
+ }
+
+ @Test
+ public void threadList_integration() throws RocksDBException {
+ try (final Env env = RocksEnv.getDefault();
+ final Options opt = new Options()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true)
+ .setEnv(env)) {
+ // open database
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath())) {
+
+ final List<ThreadStatus> threadList = env.getThreadList();
+ assertThat(threadList.size()).isGreaterThan(0);
+ }
+ }
+ }
+}
public static final Random rand = PlatformRandomHelper.getPlatformSpecificRandomFactory();
+ @Test
+ public void dbOptionsConstructor() {
+ final long compactionReadaheadSize = 4 * 1024 * 1024;
+ try (final DBOptions dbOptions = new DBOptions()
+ .setCompactionReadaheadSize(compactionReadaheadSize)) {
+ try (final EnvOptions envOptions = new EnvOptions(dbOptions)) {
+ assertThat(envOptions.compactionReadaheadSize())
+ .isEqualTo(compactionReadaheadSize);
+ }
+ }
+ }
+
@Test
public void useMmapReads() {
try (final EnvOptions envOptions = new EnvOptions()) {
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class FlushOptionsTest {
+
+ @Test
+ public void waitForFlush() {
+ try (final FlushOptions flushOptions = new FlushOptions()) {
+ assertThat(flushOptions.waitForFlush()).isTrue();
+ flushOptions.setWaitForFlush(false);
+ assertThat(flushOptions.waitForFlush()).isFalse();
+ }
+ }
+
+ @Test
+ public void allowWriteStall() {
+ try (final FlushOptions flushOptions = new FlushOptions()) {
+ assertThat(flushOptions.allowWriteStall()).isFalse();
+ flushOptions.setAllowWriteStall(true);
+ assertThat(flushOptions.allowWriteStall()).isTrue();
+ }
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class HdfsEnvTest {
+
+ @ClassRule
+ public static final RocksMemoryResource rocksMemoryResource =
+ new RocksMemoryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ // expect org.rocksdb.RocksDBException: Not compiled with hdfs support
+ @Test(expected = RocksDBException.class)
+ public void construct() throws RocksDBException {
+ try (final Env env = new HdfsEnv("hdfs://localhost:5000")) {
+ // no-op
+ }
+ }
+
+ // expect org.rocksdb.RocksDBException: Not compiled with hdfs support
+ @Test(expected = RocksDBException.class)
+ public void construct_integration() throws RocksDBException {
+ try (final Env env = new HdfsEnv("hdfs://localhost:5000");
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setEnv(env);
+ ) {
+ try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ }
+ }
+ }
+}
assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush);
}
}
+
+ @Test
+ public void ingestBehind() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ assertThat(options.ingestBehind()).isFalse();
+ options.setIngestBehind(true);
+ assertThat(options.ingestBehind()).isTrue();
+ }
+ }
+
+ @Test
+ public void writeGlobalSeqno() {
+ try (final IngestExternalFileOptions options =
+ new IngestExternalFileOptions()) {
+ assertThat(options.writeGlobalSeqno()).isTrue();
+ options.setWriteGlobalSeqno(false);
+ assertThat(options.writeGlobalSeqno()).isFalse();
+ }
+ }
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+package org.rocksdb;
+
+import org.junit.Test;
+import org.rocksdb.MutableDBOptions.MutableDBOptionsBuilder;
+
+import java.util.NoSuchElementException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class MutableDBOptionsTest {
+
+ @Test
+ public void builder() {
+ final MutableDBOptionsBuilder builder =
+ MutableDBOptions.builder();
+ builder
+ .setBytesPerSync(1024 * 1024 * 7)
+ .setMaxBackgroundJobs(5)
+ .setAvoidFlushDuringShutdown(false);
+
+ assertThat(builder.bytesPerSync()).isEqualTo(1024 * 1024 * 7);
+ assertThat(builder.maxBackgroundJobs()).isEqualTo(5);
+ assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(false);
+ }
+
+ @Test(expected = NoSuchElementException.class)
+ public void builder_getWhenNotSet() {
+ final MutableDBOptionsBuilder builder =
+ MutableDBOptions.builder();
+
+ builder.bytesPerSync();
+ }
+
+ @Test
+ public void builder_build() {
+ final MutableDBOptions options = MutableDBOptions
+ .builder()
+ .setBytesPerSync(1024 * 1024 * 7)
+ .setMaxBackgroundJobs(5)
+ .build();
+
+ assertThat(options.getKeys().length).isEqualTo(2);
+ assertThat(options.getValues().length).isEqualTo(2);
+ assertThat(options.getKeys()[0])
+ .isEqualTo(
+ MutableDBOptions.DBOption.bytes_per_sync.name());
+ assertThat(options.getValues()[0]).isEqualTo("7340032");
+ assertThat(options.getKeys()[1])
+ .isEqualTo(
+ MutableDBOptions.DBOption.max_background_jobs.name());
+ assertThat(options.getValues()[1]).isEqualTo("5");
+ }
+
+ @Test
+ public void mutableColumnFamilyOptions_toString() {
+ final String str = MutableDBOptions
+ .builder()
+ .setMaxOpenFiles(99)
+ .setDelayedWriteRate(789)
+ .setAvoidFlushDuringShutdown(true)
+ .build()
+ .toString();
+
+ assertThat(str).isEqualTo("max_open_files=99;delayed_write_rate=789;"
+ + "avoid_flush_during_shutdown=true");
+ }
+
+ @Test
+ public void mutableColumnFamilyOptions_parse() {
+ final String str = "max_open_files=99;delayed_write_rate=789;"
+ + "avoid_flush_during_shutdown=true";
+
+ final MutableDBOptionsBuilder builder =
+ MutableDBOptions.parse(str);
+
+ assertThat(builder.maxOpenFiles()).isEqualTo(99);
+ assertThat(builder.delayedWriteRate()).isEqualTo(789);
+ assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(true);
+ }
+}
package org.rocksdb;
import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
+import java.util.*;
import org.junit.ClassRule;
import org.junit.Test;
}
}
+ @Test
+ public void enablePipelinedWrite() {
+ try(final Options opt = new Options()) {
+ assertThat(opt.enablePipelinedWrite()).isFalse();
+ opt.setEnablePipelinedWrite(true);
+ assertThat(opt.enablePipelinedWrite()).isTrue();
+ }
+ }
+
@Test
public void allowConcurrentMemtableWrite() {
try (final Options opt = new Options()) {
}
}
+ @Test
+ public void walFilter() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.walFilter()).isNull();
+
+ try (final AbstractWalFilter walFilter = new AbstractWalFilter() {
+ @Override
+ public void columnFamilyLogNumberMap(
+ final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId) {
+ // no-op
+ }
+
+ @Override
+ public LogRecordFoundResult logRecordFound(final long logNumber,
+ final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch) {
+ return new LogRecordFoundResult(
+ WalProcessingOption.CONTINUE_PROCESSING, false);
+ }
+
+ @Override
+ public String name() {
+ return "test-wal-filter";
+ }
+ }) {
+ opt.setWalFilter(walFilter);
+ assertThat(opt.walFilter()).isEqualTo(walFilter);
+ }
+ }
+ }
+
@Test
public void failIfOptionsFileError() {
try (final Options opt = new Options()) {
}
}
+
+ @Test
+ public void allowIngestBehind() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.allowIngestBehind()).isFalse();
+ opt.setAllowIngestBehind(true);
+ assertThat(opt.allowIngestBehind()).isTrue();
+ }
+ }
+
+ @Test
+ public void preserveDeletes() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.preserveDeletes()).isFalse();
+ opt.setPreserveDeletes(true);
+ assertThat(opt.preserveDeletes()).isTrue();
+ }
+ }
+
+ @Test
+ public void twoWriteQueues() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.twoWriteQueues()).isFalse();
+ opt.setTwoWriteQueues(true);
+ assertThat(opt.twoWriteQueues()).isTrue();
+ }
+ }
+
+ @Test
+ public void manualWalFlush() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.manualWalFlush()).isFalse();
+ opt.setManualWalFlush(true);
+ assertThat(opt.manualWalFlush()).isTrue();
+ }
+ }
+
+ @Test
+ public void atomicFlush() {
+ try (final Options opt = new Options()) {
+ assertThat(opt.atomicFlush()).isFalse();
+ opt.setAtomicFlush(true);
+ assertThat(opt.atomicFlush()).isTrue();
+ }
+ }
+
@Test
public void env() {
try (final Options options = new Options();
}
}
+ @Test
+ public void bottommostCompressionOptions() {
+ try (final Options options = new Options();
+ final CompressionOptions bottommostCompressionOptions = new CompressionOptions()
+ .setMaxDictBytes(123)) {
+
+ options.setBottommostCompressionOptions(bottommostCompressionOptions);
+ assertThat(options.bottommostCompressionOptions())
+ .isEqualTo(bottommostCompressionOptions);
+ assertThat(options.bottommostCompressionOptions().maxDictBytes())
+ .isEqualTo(123);
+ }
+ }
+
@Test
public void compressionOptions() {
try (final Options options = new Options();
}
}
+ @Test
+ public void ttl() {
+ try (final Options options = new Options()) {
+ options.setTtl(1000 * 60);
+ assertThat(options.ttl()).
+ isEqualTo(1000 * 60);
+ }
+ }
+
@Test
public void compactionOptionsUniversal() {
try (final Options options = new Options();
@Rule
public ExpectedException exception = ExpectedException.none();
+ @Test
+ public void altConstructor() {
+ try (final ReadOptions opt = new ReadOptions(true, true)) {
+ assertThat(opt.verifyChecksums()).isTrue();
+ assertThat(opt.fillCache()).isTrue();
+ }
+ }
+
+ @Test
+ public void copyConstructor() {
+ try (final ReadOptions opt = new ReadOptions()) {
+ opt.setVerifyChecksums(false);
+ opt.setFillCache(false);
+ opt.setIterateUpperBound(buildRandomSlice());
+ opt.setIterateLowerBound(buildRandomSlice());
+ try (final ReadOptions other = new ReadOptions(opt)) {
+ assertThat(opt.verifyChecksums()).isEqualTo(other.verifyChecksums());
+ assertThat(opt.fillCache()).isEqualTo(other.fillCache());
+ assertThat(Arrays.equals(opt.iterateUpperBound().data(), other.iterateUpperBound().data())).isTrue();
+ assertThat(Arrays.equals(opt.iterateLowerBound().data(), other.iterateLowerBound().data())).isTrue();
+ }
+ }
+ }
+
@Test
public void verifyChecksum() {
try (final ReadOptions opt = new ReadOptions()) {
}
@Test
- public void copyConstructor() {
+ public void tableFilter() {
+ try (final ReadOptions opt = new ReadOptions();
+ final AbstractTableFilter allTablesFilter = new AllTablesFilter()) {
+ opt.setTableFilter(allTablesFilter);
+ }
+ }
+
+ @Test
+ public void iterStartSeqnum() {
try (final ReadOptions opt = new ReadOptions()) {
- opt.setVerifyChecksums(false);
- opt.setFillCache(false);
- opt.setIterateUpperBound(buildRandomSlice());
- opt.setIterateLowerBound(buildRandomSlice());
- ReadOptions other = new ReadOptions(opt);
- assertThat(opt.verifyChecksums()).isEqualTo(other.verifyChecksums());
- assertThat(opt.fillCache()).isEqualTo(other.fillCache());
- assertThat(Arrays.equals(opt.iterateUpperBound().data(), other.iterateUpperBound().data())).isTrue();
- assertThat(Arrays.equals(opt.iterateLowerBound().data(), other.iterateLowerBound().data())).isTrue();
+ assertThat(opt.iterStartSeqnum()).isEqualTo(0);
+
+ opt.setIterStartSeqnum(10);
+ assertThat(opt.iterStartSeqnum()).isEqualTo(10);
}
}
return new Slice(sliceBytes);
}
+ private static class AllTablesFilter extends AbstractTableFilter {
+ @Override
+ public boolean filter(final TableProperties tableProperties) {
+ return true;
+ }
+ }
}
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
+import org.junit.*;
import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
import java.nio.ByteBuffer;
import java.util.*;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
}
}
+ @Test
+ public void createColumnFamily() throws RocksDBException {
+ final byte[] col1Name = "col1".getBytes(UTF_8);
+
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
+ ) {
+ try (final ColumnFamilyHandle col1 =
+ db.createColumnFamily(new ColumnFamilyDescriptor(col1Name, cfOpts))) {
+ assertThat(col1).isNotNull();
+ assertThat(col1.getName()).isEqualTo(col1Name);
+ }
+ }
+
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(col1Name)),
+ cfHandles)) {
+ try {
+ assertThat(cfHandles.size()).isEqualTo(2);
+ assertThat(cfHandles.get(1)).isNotNull();
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle :
+ cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+
+
+ @Test
+ public void createColumnFamilies() throws RocksDBException {
+ final byte[] col1Name = "col1".getBytes(UTF_8);
+ final byte[] col2Name = "col2".getBytes(UTF_8);
+
+ List<ColumnFamilyHandle> cfHandles;
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
+ ) {
+ cfHandles =
+ db.createColumnFamilies(cfOpts, Arrays.asList(col1Name, col2Name));
+ try {
+ assertThat(cfHandles).isNotNull();
+ assertThat(cfHandles.size()).isEqualTo(2);
+ assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+
+ cfHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(col1Name),
+ new ColumnFamilyDescriptor(col2Name)),
+ cfHandles)) {
+ try {
+ assertThat(cfHandles.size()).isEqualTo(3);
+ assertThat(cfHandles.get(1)).isNotNull();
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(2)).isNotNull();
+ assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void createColumnFamiliesfromDescriptors() throws RocksDBException {
+ final byte[] col1Name = "col1".getBytes(UTF_8);
+ final byte[] col2Name = "col2".getBytes(UTF_8);
+
+ List<ColumnFamilyHandle> cfHandles;
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
+ final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
+ ) {
+ cfHandles =
+ db.createColumnFamilies(Arrays.asList(
+ new ColumnFamilyDescriptor(col1Name, cfOpts),
+ new ColumnFamilyDescriptor(col2Name, cfOpts)));
+ try {
+ assertThat(cfHandles).isNotNull();
+ assertThat(cfHandles.size()).isEqualTo(2);
+ assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+
+ cfHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(),
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(col1Name),
+ new ColumnFamilyDescriptor(col2Name)),
+ cfHandles)) {
+ try {
+ assertThat(cfHandles.size()).isEqualTo(3);
+ assertThat(cfHandles.get(1)).isNotNull();
+ assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name);
+ assertThat(cfHandles.get(2)).isNotNull();
+ assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+
@Test
public void put() throws RocksDBException {
try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath());
}
}
}
+
+ @Test
+ public void getApproximateSizes() throws RocksDBException {
+ final byte key1[] = "key1".getBytes(UTF_8);
+ final byte key2[] = "key2".getBytes(UTF_8);
+ final byte key3[] = "key3".getBytes(UTF_8);
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put(key1, key1);
+ db.put(key2, key2);
+ db.put(key3, key3);
+
+ final long[] sizes = db.getApproximateSizes(
+ Arrays.asList(
+ new Range(new Slice(key1), new Slice(key2)),
+ new Range(new Slice(key2), new Slice(key3))
+ ),
+ SizeApproximationFlag.INCLUDE_FILES,
+ SizeApproximationFlag.INCLUDE_MEMTABLES);
+
+ assertThat(sizes.length).isEqualTo(2);
+ assertThat(sizes[0]).isEqualTo(0);
+ assertThat(sizes[1]).isGreaterThanOrEqualTo(1);
+ }
+ }
+ }
+
+ @Test
+ public void getApproximateMemTableStats() throws RocksDBException {
+ final byte key1[] = "key1".getBytes(UTF_8);
+ final byte key2[] = "key2".getBytes(UTF_8);
+ final byte key3[] = "key3".getBytes(UTF_8);
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put(key1, key1);
+ db.put(key2, key2);
+ db.put(key3, key3);
+
+ final RocksDB.CountAndSize stats =
+ db.getApproximateMemTableStats(
+ new Range(new Slice(key1), new Slice(key3)));
+
+ assertThat(stats).isNotNull();
+ assertThat(stats.count).isGreaterThan(1);
+ assertThat(stats.size).isGreaterThan(1);
+ }
+ }
+ }
+
+ @Ignore("TODO(AR) re-enable when ready!")
+ @Test
+ public void compactFiles() throws RocksDBException {
+ final int kTestKeySize = 16;
+ final int kTestValueSize = 984;
+ final int kEntrySize = kTestKeySize + kTestValueSize;
+ final int kEntriesPerBuffer = 100;
+ final int writeBufferSize = kEntrySize * kEntriesPerBuffer;
+ final byte[] cfName = "pikachu".getBytes(UTF_8);
+
+ try (final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setWriteBufferSize(writeBufferSize)
+ .setCompactionStyle(CompactionStyle.LEVEL)
+ .setTargetFileSizeBase(writeBufferSize)
+ .setMaxBytesForLevelBase(writeBufferSize * 2)
+ .setLevel0StopWritesTrigger(2)
+ .setMaxBytesForLevelMultiplier(2)
+ .setCompressionType(CompressionType.NO_COMPRESSION)
+ .setMaxSubcompactions(4)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath);
+ final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) {
+ db.createColumnFamily(new ColumnFamilyDescriptor(cfName,
+ cfOptions)).close();
+ }
+
+ try (final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) {
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions),
+ new ColumnFamilyDescriptor(cfName, cfOptions)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ try (final DBOptions dbOptions = new DBOptions(options);
+ final RocksDB db = RocksDB.open(dbOptions, dbPath, cfDescriptors,
+ cfHandles);
+ ) {
+ try (final FlushOptions flushOptions = new FlushOptions()
+ .setWaitForFlush(true)
+ .setAllowWriteStall(true);
+ final CompactionOptions compactionOptions = new CompactionOptions()) {
+ final Random rnd = new Random(301);
+ for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) {
+ final byte[] value = new byte[kTestValueSize];
+ rnd.nextBytes(value);
+ db.put(cfHandles.get(1), Integer.toString(key).getBytes(UTF_8),
+ value);
+ }
+ db.flush(flushOptions, cfHandles);
+
+ final RocksDB.LiveFiles liveFiles = db.getLiveFiles();
+ final List<String> compactedFiles =
+ db.compactFiles(compactionOptions, cfHandles.get(1),
+ liveFiles.files, 1, -1, null);
+ assertThat(compactedFiles).isNotEmpty();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void enableAutoCompaction() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ try {
+ db.enableAutoCompaction(cfHandles);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void numberLevels() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.numberLevels()).isEqualTo(7);
+ }
+ }
+ }
+
+ @Test
+ public void maxMemCompactionLevel() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.maxMemCompactionLevel()).isEqualTo(0);
+ }
+ }
+ }
+
+ @Test
+ public void level0StopWriteTrigger() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.level0StopWriteTrigger()).isEqualTo(36);
+ }
+ }
+ }
+
+ @Test
+ public void getName() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.getName()).isEqualTo(dbPath);
+ }
+ }
+ }
+
+ @Test
+ public void getEnv() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.getEnv()).isEqualTo(Env.getDefault());
+ }
+ }
+ }
+
+ @Test
+ public void flush() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath);
+ final FlushOptions flushOptions = new FlushOptions()) {
+ db.flush(flushOptions);
+ }
+ }
+ }
+
+ @Test
+ public void flushWal() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.flushWal(true);
+ }
+ }
+ }
+
+ @Test
+ public void syncWal() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.syncWal();
+ }
+ }
+ }
+
+ @Test
+ public void setPreserveDeletesSequenceNumber() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ assertThat(db.setPreserveDeletesSequenceNumber(db.getLatestSequenceNumber()))
+ .isFalse();
+ }
+ }
+ }
+
+ @Test
+ public void getLiveFiles() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ final RocksDB.LiveFiles livefiles = db.getLiveFiles(true);
+ assertThat(livefiles).isNotNull();
+ assertThat(livefiles.manifestFileSize).isEqualTo(13);
+ assertThat(livefiles.files.size()).isEqualTo(3);
+ assertThat(livefiles.files.get(0)).isEqualTo("/CURRENT");
+ assertThat(livefiles.files.get(1)).isEqualTo("/MANIFEST-000001");
+ assertThat(livefiles.files.get(2)).isEqualTo("/OPTIONS-000005");
+ }
+ }
+ }
+
+ @Test
+ public void getSortedWalFiles() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ final List<LogFile> logFiles = db.getSortedWalFiles();
+ assertThat(logFiles).isNotNull();
+ assertThat(logFiles.size()).isEqualTo(1);
+ assertThat(logFiles.get(0).type())
+ .isEqualTo(WalFileType.kAliveLogFile);
+ }
+ }
+ }
+
+ @Test
+ public void deleteFile() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.deleteFile("unknown");
+ }
+ }
+ }
+
+ @Test
+ public void getLiveFilesMetaData() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ final List<LiveFileMetaData> liveFilesMetaData
+ = db.getLiveFilesMetaData();
+ assertThat(liveFilesMetaData).isEmpty();
+ }
+ }
+ }
+
+ @Test
+ public void getColumnFamilyMetaData() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ try {
+ final ColumnFamilyMetaData cfMetadata =
+ db.getColumnFamilyMetaData(cfHandles.get(0));
+ assertThat(cfMetadata).isNotNull();
+ assertThat(cfMetadata.name()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY);
+ assertThat(cfMetadata.levels().size()).isEqualTo(7);
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void verifyChecksum() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.verifyChecksum();
+ }
+ }
+ }
+
+ @Test
+ public void getPropertiesOfAllTables() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ try {
+ final Map<String, TableProperties> properties =
+ db.getPropertiesOfAllTables(cfHandles.get(0));
+ assertThat(properties).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void getPropertiesOfTablesInRange() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+ try {
+ final Range range = new Range(
+ new Slice("key1".getBytes(UTF_8)),
+ new Slice("key3".getBytes(UTF_8)));
+ final Map<String, TableProperties> properties =
+ db.getPropertiesOfTablesInRange(
+ cfHandles.get(0), Arrays.asList(range));
+ assertThat(properties).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void suggestCompactRange() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)) {
+ final List<ColumnFamilyDescriptor> cfDescs = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) {
+ db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+ try {
+ final Range range = db.suggestCompactRange(cfHandles.get(0));
+ assertThat(range).isNotNull();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void promoteL0() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ db.promoteL0(2);
+ }
+ }
+ }
+
+ @Test
+ public void startTrace() throws RocksDBException {
+ try (final Options options = new Options().setCreateIfMissing(true)) {
+ final String dbPath = dbFolder.getRoot().getAbsolutePath();
+ try (final RocksDB db = RocksDB.open(options, dbPath)) {
+ final TraceOptions traceOptions = new TraceOptions();
+
+ try (final InMemoryTraceWriter traceWriter = new InMemoryTraceWriter()) {
+ db.startTrace(traceOptions, traceWriter);
+
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+
+ db.endTrace();
+
+ final List<byte[]> writes = traceWriter.getWrites();
+ assertThat(writes.size()).isGreaterThan(0);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void setDBOptions() throws RocksDBException {
+ try (final DBOptions options = new DBOptions()
+ .setCreateIfMissing(true)
+ .setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions()
+ .setWriteBufferSize(4096)) {
+
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts));
+
+ // open database
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+ try (final RocksDB db = RocksDB.open(options,
+ dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) {
+ try {
+ final MutableDBOptions mutableOptions =
+ MutableDBOptions.builder()
+ .setBytesPerSync(1024 * 1027 * 7)
+ .setAvoidFlushDuringShutdown(false)
+ .build();
+
+ db.setDBOptions(mutableOptions);
+ } finally {
+ for (final ColumnFamilyHandle handle : columnFamilyHandles) {
+ handle.close();
+ }
+ }
+ }
+ }
+ }
+
+ private static class InMemoryTraceWriter extends AbstractTraceWriter {
+ private final List<byte[]> writes = new ArrayList<>();
+ private volatile boolean closed = false;
+
+ @Override
+ public void write(final Slice slice) {
+ if (closed) {
+ return;
+ }
+ final byte[] data = slice.data();
+ final byte[] dataCopy = new byte[data.length];
+ System.arraycopy(data, 0, dataCopy, 0, data.length);
+ writes.add(dataCopy);
+ }
+
+ @Override
+ public void closeWriter() {
+ closed = true;
+ }
+
+ @Override
+ public long getFileSize() {
+ long size = 0;
+ for (int i = 0; i < writes.size(); i++) {
+ size += writes.get(i).length;
+ }
+ return size;
+ }
+
+ public List<byte[]> getWrites() {
+ return writes;
+ }
+ }
}
+++ /dev/null
-// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
-// This source code is licensed under both the GPLv2 (found in the
-// COPYING file in the root directory) and Apache 2.0 License
-// (found in the LICENSE.Apache file in the root directory).
-
-package org.rocksdb;
-
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class RocksEnvTest {
-
- @ClassRule
- public static final RocksMemoryResource rocksMemoryResource =
- new RocksMemoryResource();
-
- @Test
- public void rocksEnv() {
- try (final Env rocksEnv = RocksEnv.getDefault()) {
- rocksEnv.setBackgroundThreads(5);
- // default rocksenv will always return zero for flush pool
- // no matter what was set via setBackgroundThreads
- assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
- isEqualTo(0);
- rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL);
- // default rocksenv will always return zero for flush pool
- // no matter what was set via setBackgroundThreads
- assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)).
- isEqualTo(0);
- rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL);
- // default rocksenv will always return zero for compaction pool
- // no matter what was set via setBackgroundThreads
- assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)).
- isEqualTo(0);
- }
- }
-}
"baz".getBytes()
};
- try (final Env env = new RocksMemEnv();
+ try (final Env env = new RocksMemEnv(Env.getDefault());
final Options options = new Options()
.setCreateIfMissing(true)
.setEnv(env);
"baz".getBytes()
};
- try (final Env env = new RocksMemEnv();
+ try (final Env env = new RocksMemEnv(Env.getDefault());
final Options options = new Options()
.setCreateIfMissing(true)
.setEnv(env);
@Test(expected = RocksDBException.class)
public void createIfMissingFalse() throws RocksDBException {
- try (final Env env = new RocksMemEnv();
+ try (final Env env = new RocksMemEnv(Env.getDefault());
final Options options = new Options()
.setCreateIfMissing(false)
.setEnv(env);
--- /dev/null
+package org.rocksdb;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class TableFilterTest {
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void readOptions() throws RocksDBException {
+ try (final DBOptions opt = new DBOptions().
+ setCreateIfMissing(true).
+ setCreateMissingColumnFamilies(true);
+ final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions()
+ ) {
+ final List<ColumnFamilyDescriptor> columnFamilyDescriptors =
+ Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)
+ );
+
+ final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
+
+ // open database
+ try (final RocksDB db = RocksDB.open(opt,
+ dbFolder.getRoot().getAbsolutePath(),
+ columnFamilyDescriptors,
+ columnFamilyHandles)) {
+
+ try (final CfNameCollectionTableFilter cfNameCollectingTableFilter =
+ new CfNameCollectionTableFilter();
+ final FlushOptions flushOptions =
+ new FlushOptions().setWaitForFlush(true);
+ final ReadOptions readOptions =
+ new ReadOptions().setTableFilter(cfNameCollectingTableFilter)) {
+
+ db.put(columnFamilyHandles.get(0),
+ "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(0),
+ "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(0),
+ "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(1),
+ "key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(1),
+ "key2".getBytes(UTF_8), "value2".getBytes(UTF_8));
+ db.put(columnFamilyHandles.get(1),
+ "key3".getBytes(UTF_8), "value3".getBytes(UTF_8));
+
+ db.flush(flushOptions, columnFamilyHandles);
+
+ try (final RocksIterator iterator =
+ db.newIterator(columnFamilyHandles.get(0), readOptions)) {
+ iterator.seekToFirst();
+ while (iterator.isValid()) {
+ iterator.key();
+ iterator.value();
+ iterator.next();
+ }
+ }
+
+ try (final RocksIterator iterator =
+ db.newIterator(columnFamilyHandles.get(1), readOptions)) {
+ iterator.seekToFirst();
+ while (iterator.isValid()) {
+ iterator.key();
+ iterator.value();
+ iterator.next();
+ }
+ }
+
+ assertThat(cfNameCollectingTableFilter.cfNames.size()).isEqualTo(2);
+ assertThat(cfNameCollectingTableFilter.cfNames.get(0))
+ .isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY);
+ assertThat(cfNameCollectingTableFilter.cfNames.get(1))
+ .isEqualTo("new_cf".getBytes(UTF_8));
+ } finally {
+ for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
+ columnFamilyHandle.close();
+ }
+ }
+ }
+ }
+ }
+
+ private static class CfNameCollectionTableFilter extends AbstractTableFilter {
+ private final List<byte[]> cfNames = new ArrayList<>();
+
+ @Override
+ public boolean filter(final TableProperties tableProperties) {
+ cfNames.add(tableProperties.getColumnFamilyName());
+ return true;
+ }
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class TimedEnvTest {
+
+ @ClassRule
+ public static final RocksMemoryResource rocksMemoryResource =
+ new RocksMemoryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void construct() throws RocksDBException {
+ try (final Env env = new TimedEnv(Env.getDefault())) {
+ // no-op
+ }
+ }
+
+ @Test
+ public void construct_integration() throws RocksDBException {
+ try (final Env env = new TimedEnv(Env.getDefault());
+ final Options options = new Options()
+ .setCreateIfMissing(true)
+ .setEnv(env);
+ ) {
+ try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) {
+ db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8));
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.rocksdb.util.TestUtil.*;
+
+public class WalFilterTest {
+
+ @ClassRule
+ public static final RocksMemoryResource rocksMemoryResource =
+ new RocksMemoryResource();
+
+ @Rule
+ public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ @Test
+ public void walFilter() throws RocksDBException {
+ // Create 3 batches with two keys each
+ final byte[][][] batchKeys = {
+ new byte[][] {
+ u("key1"),
+ u("key2")
+ },
+ new byte[][] {
+ u("key3"),
+ u("key4")
+ },
+ new byte[][] {
+ u("key5"),
+ u("key6")
+ }
+
+ };
+
+ final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(
+ new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY),
+ new ColumnFamilyDescriptor(u("pikachu"))
+ );
+ final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
+
+ // Test with all WAL processing options
+ for (final WalProcessingOption option : WalProcessingOption.values()) {
+ try (final Options options = optionsForLogIterTest();
+ final DBOptions dbOptions = new DBOptions(options)
+ .setCreateMissingColumnFamilies(true);
+ final RocksDB db = RocksDB.open(dbOptions,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, cfHandles)) {
+ try (final WriteOptions writeOptions = new WriteOptions()) {
+ // Write given keys in given batches
+ for (int i = 0; i < batchKeys.length; i++) {
+ final WriteBatch batch = new WriteBatch();
+ for (int j = 0; j < batchKeys[i].length; j++) {
+ batch.put(cfHandles.get(0), batchKeys[i][j], dummyString(1024));
+ }
+ db.write(writeOptions, batch);
+ }
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ }
+ }
+
+ // Create a test filter that would apply wal_processing_option at the first
+ // record
+ final int applyOptionForRecordIndex = 1;
+ try (final TestableWalFilter walFilter =
+ new TestableWalFilter(option, applyOptionForRecordIndex)) {
+
+ try (final Options options = optionsForLogIterTest();
+ final DBOptions dbOptions = new DBOptions(options)
+ .setWalFilter(walFilter)) {
+
+ try (final RocksDB db = RocksDB.open(dbOptions,
+ dbFolder.getRoot().getAbsolutePath(),
+ cfDescriptors, cfHandles)) {
+
+ try {
+ assertThat(walFilter.logNumbers).isNotEmpty();
+ assertThat(walFilter.logFileNames).isNotEmpty();
+ } finally {
+ for (final ColumnFamilyHandle cfHandle : cfHandles) {
+ cfHandle.close();
+ }
+ cfHandles.clear();
+ }
+ } catch (final RocksDBException e) {
+ if (option != WalProcessingOption.CORRUPTED_RECORD) {
+ // exception is expected when CORRUPTED_RECORD!
+ throw e;
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ private static class TestableWalFilter extends AbstractWalFilter {
+ private final WalProcessingOption walProcessingOption;
+ private final int applyOptionForRecordIndex;
+ Map<Integer, Long> cfLognumber;
+ Map<String, Integer> cfNameId;
+ final List<Long> logNumbers = new ArrayList<>();
+ final List<String> logFileNames = new ArrayList<>();
+ private int currentRecordIndex = 0;
+
+ public TestableWalFilter(final WalProcessingOption walProcessingOption,
+ final int applyOptionForRecordIndex) {
+ super();
+ this.walProcessingOption = walProcessingOption;
+ this.applyOptionForRecordIndex = applyOptionForRecordIndex;
+ }
+
+ @Override
+ public void columnFamilyLogNumberMap(final Map<Integer, Long> cfLognumber,
+ final Map<String, Integer> cfNameId) {
+ this.cfLognumber = cfLognumber;
+ this.cfNameId = cfNameId;
+ }
+
+ @Override
+ public LogRecordFoundResult logRecordFound(
+ final long logNumber, final String logFileName, final WriteBatch batch,
+ final WriteBatch newBatch) {
+
+ logNumbers.add(logNumber);
+ logFileNames.add(logFileName);
+
+ final WalProcessingOption optionToReturn;
+ if (currentRecordIndex == applyOptionForRecordIndex) {
+ optionToReturn = walProcessingOption;
+ }
+ else {
+ optionToReturn = WalProcessingOption.CONTINUE_PROCESSING;
+ }
+
+ currentRecordIndex++;
+
+ return new LogRecordFoundResult(optionToReturn, false);
+ }
+
+ @Override
+ public String name() {
+ return "testable-wal-filter";
+ }
+ }
+}
assertThat(writeOptions.noSlowdown()).isTrue();
writeOptions.setNoSlowdown(false);
assertThat(writeOptions.noSlowdown()).isFalse();
+
+ writeOptions.setLowPri(true);
+ assertThat(writeOptions.lowPri()).isTrue();
+ writeOptions.setLowPri(false);
+ assertThat(writeOptions.lowPri()).isFalse();
}
}
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb.util;
+
+import org.rocksdb.CompactionPriority;
+import org.rocksdb.Options;
+import org.rocksdb.WALRecoveryMode;
+
+import java.util.Random;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * General test utilities.
+ */
+public class TestUtil {
+
+ /**
+ * Get the options for log iteration tests.
+ *
+ * @return the options
+ */
+ public static Options optionsForLogIterTest() {
+ return defaultOptions()
+ .setCreateIfMissing(true)
+ .setWalTtlSeconds(1000);
+ }
+
+ /**
+ * Get the default options.
+ *
+ * @return the options
+ */
+ public static Options defaultOptions() {
+ return new Options()
+ .setWriteBufferSize(4090 * 4096)
+ .setTargetFileSizeBase(2 * 1024 * 1024)
+ .setMaxBytesForLevelBase(10 * 1024 * 1024)
+ .setMaxOpenFiles(5000)
+ .setWalRecoveryMode(WALRecoveryMode.TolerateCorruptedTailRecords)
+ .setCompactionPriority(CompactionPriority.ByCompensatedSize);
+ }
+
+ private static final Random random = new Random();
+
+ /**
+ * Generate a random string of bytes.
+ *
+ * @param len the length of the string to generate.
+ *
+ * @return the random string of bytes
+ */
+ public static byte[] dummyString(final int len) {
+ final byte[] str = new byte[len];
+ random.nextBytes(str);
+ return str;
+ }
+
+ /**
+ * Convert a UTF-8 String to a byte array.
+ *
+ * @param str the string
+ *
+ * @return the byte array.
+ */
+ public static byte[] u(final String str) {
+ return str.getBytes(UTF_8);
+ }
+}
java/rocksjni/checkpoint.cc \
java/rocksjni/clock_cache.cc \
java/rocksjni/columnfamilyhandle.cc \
+ java/rocksjni/compact_range_options.cc \
java/rocksjni/compaction_filter.cc \
java/rocksjni/compaction_filter_factory.cc \
java/rocksjni/compaction_filter_factory_jnicallback.cc \
- java/rocksjni/compact_range_options.cc \
+ java/rocksjni/compaction_job_info.cc \
+ java/rocksjni/compaction_job_stats.cc \
+ java/rocksjni/compaction_options.cc \
java/rocksjni/compaction_options_fifo.cc \
java/rocksjni/compaction_options_universal.cc \
java/rocksjni/comparator.cc \
java/rocksjni/optimistic_transaction_options.cc \
java/rocksjni/options.cc \
java/rocksjni/options_util.cc \
+ java/rocksjni/persistent_cache.cc \
java/rocksjni/ratelimiterjni.cc \
java/rocksjni/remove_emptyvalue_compactionfilterjni.cc \
java/rocksjni/cassandra_compactionfilterjni.cc \
java/rocksjni/statistics.cc \
java/rocksjni/statisticsjni.cc \
java/rocksjni/table.cc \
+ java/rocksjni/table_filter.cc \
+ java/rocksjni/table_filter_jnicallback.cc \
+ java/rocksjni/thread_status.cc \
+ java/rocksjni/trace_writer.cc \
+ java/rocksjni/trace_writer_jnicallback.cc \
java/rocksjni/transaction.cc \
java/rocksjni/transaction_db.cc \
java/rocksjni/transaction_options.cc \
java/rocksjni/transaction_notifier.cc \
java/rocksjni/transaction_notifier_jnicallback.cc \
java/rocksjni/ttl.cc \
+ java/rocksjni/wal_filter.cc \
+ java/rocksjni/wal_filter_jnicallback.cc \
java/rocksjni/write_batch.cc \
java/rocksjni/writebatchhandlerjnicallback.cc \
java/rocksjni/write_batch_test.cc \