/**
* ColumnFamilyOptions to control the behavior of a database. It will be used
* during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()).
- *
- * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable}
+ * <p>
+ * As a descendant of {@link AbstractNativeReference}, this class is {@link AutoCloseable}
* and will be automatically released if opened in the preamble of a try with resources block.
*/
public class ColumnFamilyOptions extends RocksObject
/**
* Construct ColumnFamilyOptions.
- *
+ * <p>
* This constructor will create (by allocating a block of memory)
* an {@code rocksdb::ColumnFamilyOptions} in the c++ side.
*/
/**
* Copy constructor for ColumnFamilyOptions.
- *
+ * <p>
* NOTE: This does a shallow copy, which means comparator, merge_operator, compaction_filter,
* compaction_filter_factory and other pointers will be cloned!
*
* @param other The ColumnFamilyOptions to copy.
*/
- public ColumnFamilyOptions(ColumnFamilyOptions other) {
+ public ColumnFamilyOptions(final ColumnFamilyOptions other) {
super(copyColumnFamilyOptions(other.nativeHandle_));
this.memTableConfig_ = other.memTableConfig_;
this.tableFormatConfig_ = other.tableFormatConfig_;
}
@Override
- public ColumnFamilyOptions setBloomLocality(int bloomLocality) {
+ public ColumnFamilyOptions setBloomLocality(final int bloomLocality) {
setBloomLocality(nativeHandle_, bloomLocality);
return this;
}
}
@Override
- public ColumnFamilyOptions
- setMemtableHugePageSize(
- long memtableHugePageSize) {
+ public ColumnFamilyOptions setMemtableHugePageSize(final long memtableHugePageSize) {
setMemtableHugePageSize(nativeHandle_,
memtableHugePageSize);
return this;
}
@Override
- public ColumnFamilyOptions setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) {
+ public ColumnFamilyOptions setSoftPendingCompactionBytesLimit(
+ final long softPendingCompactionBytesLimit) {
setSoftPendingCompactionBytesLimit(nativeHandle_,
softPendingCompactionBytesLimit);
return this;
}
@Override
- public ColumnFamilyOptions setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) {
+ public ColumnFamilyOptions setHardPendingCompactionBytesLimit(
+ final long hardPendingCompactionBytesLimit) {
setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit);
return this;
}
}
@Override
- public ColumnFamilyOptions setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) {
+ public ColumnFamilyOptions setLevel0FileNumCompactionTrigger(
+ final int level0FileNumCompactionTrigger) {
setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger);
return this;
}
}
@Override
- public ColumnFamilyOptions setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) {
+ public ColumnFamilyOptions setLevel0SlowdownWritesTrigger(final int level0SlowdownWritesTrigger) {
setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger);
return this;
}
}
@Override
- public ColumnFamilyOptions setLevel0StopWritesTrigger(int level0StopWritesTrigger) {
+ public ColumnFamilyOptions setLevel0StopWritesTrigger(final int level0StopWritesTrigger) {
setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger);
return this;
}
}
@Override
- public ColumnFamilyOptions setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) {
+ public ColumnFamilyOptions setMaxBytesForLevelMultiplierAdditional(
+ final int[] maxBytesForLevelMultiplierAdditional) {
setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional);
return this;
}
}
@Override
- public ColumnFamilyOptions setParanoidFileChecks(boolean paranoidFileChecks) {
+ public ColumnFamilyOptions setParanoidFileChecks(final boolean paranoidFileChecks) {
setParanoidFileChecks(nativeHandle_, paranoidFileChecks);
return this;
}
}
@Override
- public ColumnFamilyOptions setSstPartitionerFactory(SstPartitionerFactory sstPartitionerFactory) {
+ public ColumnFamilyOptions setSstPartitionerFactory(
+ final SstPartitionerFactory sstPartitionerFactory) {
setSstPartitionerFactory(nativeHandle_, sstPartitionerFactory.nativeHandle_);
this.sstPartitionerFactory_ = sstPartitionerFactory;
return this;
* for reads. See also the options min_blob_size, blob_file_size,
* blob_compression_type, enable_blob_garbage_collection, and
* blob_garbage_collection_age_cutoff below.
- *
+ * <p>
* Default: false
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* for reads. See also the options min_blob_size, blob_file_size,
* blob_compression_type, enable_blob_garbage_collection, and
* blob_garbage_collection_age_cutoff below.
- *
+ * <p>
* Default: false
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* alongside the keys in SST files in the usual fashion. A value of zero for
* this option means that all values are stored in blob files. Note that
* enable_blob_files has to be set in order for this option to have any effect.
- *
+ * <p>
* Default: 0
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* alongside the keys in SST files in the usual fashion. A value of zero for
* this option means that all values are stored in blob files. Note that
* enable_blob_files has to be set in order for this option to have any effect.
- *
+ * <p>
* Default: 0
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* Set the size limit for blob files. When writing blob files, a new file is opened
* once this limit is reached. Note that enable_blob_files has to be set in
* order for this option to have any effect.
- *
+ * <p>
* Default: 256 MB
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* Get the size limit for blob files. When writing blob files, a new file is opened
* once this limit is reached. Note that enable_blob_files has to be set in
* order for this option to have any effect.
- *
+ * <p>
* Default: 256 MB
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* Set the compression algorithm to use for large values stored in blob files. Note
* that enable_blob_files has to be set in order for this option to have any
* effect.
- *
+ * <p>
* Default: no compression
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* Get the compression algorithm to use for large values stored in blob files. Note
* that enable_blob_files has to be set in order for this option to have any
* effect.
- *
+ * <p>
* Default: no compression
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
* relocated to new files as they are encountered during compaction, which makes
* it possible to clean up blob files once they contain nothing but
* obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below.
- *
+ * <p>
* Default: false
*
* @param enableBlobGarbageCollection true iff blob garbage collection is to be enabled
* relocated to new files as they are encountered during compaction, which makes
* it possible to clean up blob files once they contain nothing but
* obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below.
- *
+ * <p>
* Default: false
*
* @return true iff blob garbage collection is currently enabled
* where N = garbage_collection_cutoff * number_of_blob_files. Note that
* enable_blob_garbage_collection has to be set in order for this option to have
* any effect.
- *
+ * <p>
* Default: 0.25
*
* @param blobGarbageCollectionAgeCutoff the new blob garbage collection age cutoff
* where N = garbage_collection_cutoff * number_of_blob_files. Note that
* enable_blob_garbage_collection has to be set in order for this option to have
* any effect.
- *
+ * <p>
* Default: 0.25
*
* @return the current blob garbage collection age cutoff
* the blob files in question, assuming they are all eligible based on the
* value of {@link #blobGarbageCollectionAgeCutoff} above. This option is
* currently only supported with leveled compactions.
- *
+ * <p>
* Note that {@link #enableBlobGarbageCollection} has to be set in order for this
* option to have any effect.
- *
+ * <p>
* Default: 1.0
- *
+ * <p>
* Dynamically changeable through the SetOptions() API
*
* @param blobGarbageCollectionForceThreshold new value for the threshold
/**
* Set compaction readahead for blob files.
- *
+ * <p>
* Default: 0
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
/**
* Set a certain LSM tree level to enable blob files.
- *
+ * <p>
* Default: 0
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
/**
* Get the starting LSM tree level to enable blob files.
- *
+ * <p>
* Default: 0
*
* @return the current LSM tree level to enable blob files.
/**
* Set a certain prepopulate blob cache option.
- *
+ * <p>
* Default: 0
- *
+ * <p>
* Dynamically changeable through
* {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
*
/**
* Get the prepopulate blob cache option.
- *
+ * <p>
* Default: 0
*
* @return the current prepopulate blob cache option.
--- /dev/null
+// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
+// This source code is licensed under both the GPLv2 (found in the
+// COPYING file in the root directory) and Apache 2.0 License
+// (found in the LICENSE.Apache file in the root directory).
+
+package org.rocksdb;
+
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.rocksdb.util.ReverseBytewiseComparator;
+
+public class ByteBufferUnsupportedOperationTest {
+ @ClassRule
+ public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE =
+ new RocksNativeLibraryResource();
+
+ @Rule public TemporaryFolder dbFolder = new TemporaryFolder();
+
+ public static class Handler {
+ private final RocksDB database;
+ private final Map<UUID, ColumnFamilyHandle> columnFamilies;
+
+ public Handler(final String path, final Options options) throws RocksDBException {
+ RocksDB.destroyDB(path, options);
+ this.database = RocksDB.open(options, path);
+ this.columnFamilies = new ConcurrentHashMap<>();
+ }
+
+ public void addTable(final UUID streamID) throws RocksDBException {
+ final ColumnFamilyOptions tableOptions = new ColumnFamilyOptions();
+ tableOptions.optimizeUniversalStyleCompaction();
+ try (final ComparatorOptions comparatorOptions = new ComparatorOptions()) {
+ // comparatorOptions.setReusedSynchronisationType(ReusedSynchronisationType.ADAPTIVE_MUTEX);
+ tableOptions.setComparator(new ReverseBytewiseComparator(comparatorOptions));
+ final ColumnFamilyDescriptor tableDescriptor = new ColumnFamilyDescriptor(
+ streamID.toString().getBytes(StandardCharsets.UTF_8), tableOptions);
+ final ColumnFamilyHandle tableHandle = database.createColumnFamily(tableDescriptor);
+ columnFamilies.put(streamID, tableHandle);
+ }
+ }
+
+ public void updateAll(final List<byte[][]> keyValuePairs, final UUID streamID)
+ throws RocksDBException {
+ final ColumnFamilyHandle currTable = columnFamilies.get(streamID);
+ try (final WriteBatch batchedWrite = new WriteBatch();
+ final WriteOptions writeOptions = new WriteOptions()) {
+ for (final byte[][] pair : keyValuePairs) {
+ final byte[] keyBytes = pair[0];
+ final byte[] valueBytes = pair[1];
+ batchedWrite.put(currTable, keyBytes, valueBytes);
+ }
+ database.write(writeOptions, batchedWrite);
+ }
+ }
+ public boolean containsValue(final byte[] encodedValue, final UUID streamID) {
+ try (final RocksIterator iter = database.newIterator(columnFamilies.get(streamID))) {
+ iter.seekToFirst();
+ while (iter.isValid()) {
+ final byte[] val = iter.value();
+ if (Arrays.equals(val, encodedValue)) {
+ return true;
+ }
+ iter.next();
+ }
+ }
+ return false;
+ }
+
+ public void close() {
+ for (final ColumnFamilyHandle handle : columnFamilies.values()) {
+ handle.close();
+ }
+ database.close();
+ }
+ }
+
+ private void inner(final int numRepeats) throws RocksDBException {
+ final Options opts = new Options();
+ opts.setCreateIfMissing(true);
+ final Handler handler = new Handler("testDB", opts);
+ final UUID stream1 = UUID.randomUUID();
+
+ final List<byte[][]> entries = new ArrayList<>();
+ for (int i = 0; i < numRepeats; i++) {
+ final byte[] value = value(i);
+ final byte[] key = key(i);
+ entries.add(new byte[][] {key, value});
+ }
+ handler.addTable(stream1);
+ handler.updateAll(entries, stream1);
+
+ for (int i = 0; i < numRepeats; i++) {
+ final byte[] val = value(i);
+ final boolean hasValue = handler.containsValue(val, stream1);
+ if (!hasValue) {
+ throw new IllegalStateException("not has value " + i);
+ }
+ }
+
+ handler.close();
+ }
+
+ private static byte[] key(final int i) {
+ return ("key" + i).getBytes(StandardCharsets.UTF_8);
+ }
+
+ private static byte[] value(final int i) {
+ return ("value" + i).getBytes(StandardCharsets.UTF_8);
+ }
+
+ @Test
+ public void unsupportedOperation() throws RocksDBException {
+ final int numRepeats = 1000;
+ final int repeatTest = 10;
+
+ // the error is not always reproducible... let's try to increase the odds by repeating the main
+ // test body
+ for (int i = 0; i < repeatTest; i++) {
+ try {
+ inner(numRepeats);
+ } catch (final RuntimeException runtimeException) {
+ System.out.println("Exception on repeat " + i);
+ throw runtimeException;
+ }
+ }
+ }
+}