Merge pull request #450 from adamretter/writebatch-with-index
Add WriteBatchWithIndex to the Java APImain
commit
2355931c6f
@ -0,0 +1,105 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
package org.rocksdb; |
||||||
|
|
||||||
|
/** |
||||||
|
* Base class implementation for Rocks Iterators |
||||||
|
* in the Java API |
||||||
|
* <p/> |
||||||
|
* <p>Multiple threads can invoke const methods on an RocksIterator without |
||||||
|
* external synchronization, but if any of the threads may call a |
||||||
|
* non-const method, all threads accessing the same RocksIterator must use |
||||||
|
* external synchronization.</p> |
||||||
|
* |
||||||
|
* @param P The type of the Parent Object from which the Rocks Iterator was |
||||||
|
* created. This is used by disposeInternal to avoid double-free |
||||||
|
* issues with the underlying C++ object. |
||||||
|
* @see org.rocksdb.RocksObject |
||||||
|
*/ |
||||||
|
public abstract class AbstractRocksIterator<P extends RocksObject> |
||||||
|
extends RocksObject implements RocksIteratorInterface { |
||||||
|
final P parent_; |
||||||
|
|
||||||
|
protected AbstractRocksIterator(P parent, long nativeHandle) { |
||||||
|
super(); |
||||||
|
nativeHandle_ = nativeHandle; |
||||||
|
// parent must point to a valid RocksDB instance.
|
||||||
|
assert (parent != null); |
||||||
|
// RocksIterator must hold a reference to the related parent instance
|
||||||
|
// to guarantee that while a GC cycle starts RocksIterator instances
|
||||||
|
// are freed prior to parent instances.
|
||||||
|
parent_ = parent; |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public boolean isValid() { |
||||||
|
assert (isInitialized()); |
||||||
|
return isValid0(nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void seekToFirst() { |
||||||
|
assert (isInitialized()); |
||||||
|
seekToFirst0(nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void seekToLast() { |
||||||
|
assert (isInitialized()); |
||||||
|
seekToLast0(nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void seek(byte[] target) { |
||||||
|
assert (isInitialized()); |
||||||
|
seek0(nativeHandle_, target, target.length); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void next() { |
||||||
|
assert (isInitialized()); |
||||||
|
next0(nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void prev() { |
||||||
|
assert (isInitialized()); |
||||||
|
prev0(nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void status() throws RocksDBException { |
||||||
|
assert (isInitialized()); |
||||||
|
status0(nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Deletes underlying C++ iterator pointer.</p> |
||||||
|
* <p/> |
||||||
|
* <p>Note: the underlying handle can only be safely deleted if the parent |
||||||
|
* instance related to a certain RocksIterator is still valid and initialized. |
||||||
|
* Therefore {@code disposeInternal()} checks if the parent is initialized |
||||||
|
* before freeing the native handle.</p> |
||||||
|
*/ |
||||||
|
@Override |
||||||
|
protected void disposeInternal() { |
||||||
|
synchronized (parent_) { |
||||||
|
assert (isInitialized()); |
||||||
|
if (parent_.isInitialized()) { |
||||||
|
disposeInternal(nativeHandle_); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
abstract void disposeInternal(long handle); |
||||||
|
abstract boolean isValid0(long handle); |
||||||
|
abstract void seekToFirst0(long handle); |
||||||
|
abstract void seekToLast0(long handle); |
||||||
|
abstract void next0(long handle); |
||||||
|
abstract void prev0(long handle); |
||||||
|
abstract void seek0(long handle, byte[] target, int targetLen); |
||||||
|
abstract void status0(long handle) throws RocksDBException; |
||||||
|
} |
@ -0,0 +1,92 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
package org.rocksdb; |
||||||
|
|
||||||
|
public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { |
||||||
|
|
||||||
|
@Override |
||||||
|
public int count() { |
||||||
|
assert (isInitialized()); |
||||||
|
return count0(); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void put(byte[] key, byte[] value) { |
||||||
|
assert (isInitialized()); |
||||||
|
put(key, key.length, value, value.length); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { |
||||||
|
assert (isInitialized()); |
||||||
|
put(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void merge(byte[] key, byte[] value) { |
||||||
|
assert (isInitialized()); |
||||||
|
merge(key, key.length, value, value.length); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) { |
||||||
|
assert (isInitialized()); |
||||||
|
merge(key, key.length, value, value.length, columnFamilyHandle.nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void remove(byte[] key) { |
||||||
|
assert (isInitialized()); |
||||||
|
remove(key, key.length); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) { |
||||||
|
assert (isInitialized()); |
||||||
|
remove(key, key.length, columnFamilyHandle.nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void putLogData(byte[] blob) { |
||||||
|
assert (isInitialized()); |
||||||
|
putLogData(blob, blob.length); |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public void clear() { |
||||||
|
assert (isInitialized()); |
||||||
|
clear0(); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Delete the c++ side pointer. |
||||||
|
*/ |
||||||
|
@Override |
||||||
|
protected void disposeInternal() { |
||||||
|
assert (isInitialized()); |
||||||
|
disposeInternal(nativeHandle_); |
||||||
|
} |
||||||
|
|
||||||
|
abstract void disposeInternal(long handle); |
||||||
|
|
||||||
|
abstract int count0(); |
||||||
|
|
||||||
|
abstract void put(byte[] key, int keyLen, byte[] value, int valueLen); |
||||||
|
|
||||||
|
abstract void put(byte[] key, int keyLen, byte[] value, int valueLen, long cfHandle); |
||||||
|
|
||||||
|
abstract void merge(byte[] key, int keyLen, byte[] value, int valueLen); |
||||||
|
|
||||||
|
abstract void merge(byte[] key, int keyLen, byte[] value, int valueLen, long cfHandle); |
||||||
|
|
||||||
|
abstract void remove(byte[] key, int keyLen); |
||||||
|
|
||||||
|
abstract void remove(byte[] key, int keyLen, long cfHandle); |
||||||
|
|
||||||
|
abstract void putLogData(byte[] blob, int blobLen); |
||||||
|
|
||||||
|
abstract void clear0(); |
||||||
|
} |
@ -0,0 +1,80 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
package org.rocksdb; |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Defines the interface for an Iterator which provides |
||||||
|
* access to data one entry at a time. Multiple implementations |
||||||
|
* are provided by this library. In particular, iterators are provided |
||||||
|
* to access the contents of a DB and Write Batch.</p> |
||||||
|
* <p/> |
||||||
|
* <p>Multiple threads can invoke const methods on an RocksIterator without |
||||||
|
* external synchronization, but if any of the threads may call a |
||||||
|
* non-const method, all threads accessing the same RocksIterator must use |
||||||
|
* external synchronization.</p> |
||||||
|
* |
||||||
|
* @see org.rocksdb.RocksObject |
||||||
|
*/ |
||||||
|
public interface RocksIteratorInterface { |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>An iterator is either positioned at an entry, or |
||||||
|
* not valid. This method returns true if the iterator is valid.</p> |
||||||
|
* |
||||||
|
* @return true if iterator is valid. |
||||||
|
*/ |
||||||
|
public boolean isValid(); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Position at the first entry in the source. The iterator is Valid() |
||||||
|
* after this call if the source is not empty.</p> |
||||||
|
*/ |
||||||
|
public void seekToFirst(); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Position at the last entry in the source. The iterator is |
||||||
|
* valid after this call if the source is not empty.</p> |
||||||
|
*/ |
||||||
|
public void seekToLast(); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Position at the first entry in the source whose key is that or |
||||||
|
* past target.</p> |
||||||
|
* <p/> |
||||||
|
* <p>The iterator is valid after this call if the source contains |
||||||
|
* a key that comes at or past target.</p> |
||||||
|
* |
||||||
|
* @param target byte array describing a key or a |
||||||
|
* key prefix to seek for. |
||||||
|
*/ |
||||||
|
public void seek(byte[] target); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Moves to the next entry in the source. After this call, Valid() is |
||||||
|
* true if the iterator was not positioned at the last entry in the source.</p> |
||||||
|
* <p/> |
||||||
|
* <p>REQUIRES: {@link #isValid()}</p> |
||||||
|
*/ |
||||||
|
public void next(); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Moves to the previous entry in the source. After this call, Valid() is |
||||||
|
* true if the iterator was not positioned at the first entry in source.</p> |
||||||
|
* <p/> |
||||||
|
* <p>REQUIRES: {@link #isValid()}</p> |
||||||
|
*/ |
||||||
|
public void prev(); |
||||||
|
|
||||||
|
/** |
||||||
|
* <pIf an error has occurred, return it. Else return an ok status. |
||||||
|
* If non-blocking IO is requested and this operation cannot be |
||||||
|
* satisfied without doing some IO, then this returns Status::Incomplete().</p> |
||||||
|
* |
||||||
|
* @throws RocksDBException thrown if error happens in underlying |
||||||
|
* native library. |
||||||
|
*/ |
||||||
|
public void status() throws RocksDBException; |
||||||
|
} |
@ -0,0 +1,137 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
package org.rocksdb; |
||||||
|
|
||||||
|
public class WBWIRocksIterator extends AbstractRocksIterator<WriteBatchWithIndex> { |
||||||
|
private final WriteEntry entry = new WriteEntry(); |
||||||
|
|
||||||
|
protected WBWIRocksIterator(WriteBatchWithIndex wbwi, long nativeHandle) { |
||||||
|
super(wbwi, nativeHandle); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Get the current entry |
||||||
|
* |
||||||
|
* The WriteEntry is only valid |
||||||
|
* until the iterator is repositioned. |
||||||
|
* If you want to keep the WriteEntry across iterator |
||||||
|
* movements, you must make a copy of its data! |
||||||
|
* |
||||||
|
* @return The WriteEntry of the current entry |
||||||
|
*/ |
||||||
|
public WriteEntry entry() { |
||||||
|
assert(isInitialized()); |
||||||
|
assert(entry != null); |
||||||
|
entry1(nativeHandle_, entry); |
||||||
|
return entry; |
||||||
|
} |
||||||
|
|
||||||
|
@Override final native void disposeInternal(long handle); |
||||||
|
@Override final native boolean isValid0(long handle); |
||||||
|
@Override final native void seekToFirst0(long handle); |
||||||
|
@Override final native void seekToLast0(long handle); |
||||||
|
@Override final native void next0(long handle); |
||||||
|
@Override final native void prev0(long handle); |
||||||
|
@Override final native void seek0(long handle, byte[] target, int targetLen); |
||||||
|
@Override final native void status0(long handle) throws RocksDBException; |
||||||
|
|
||||||
|
private native void entry1(long handle, WriteEntry entry); |
||||||
|
|
||||||
|
/** |
||||||
|
* Enumeration of the Write operation |
||||||
|
* that created the record in the Write Batch |
||||||
|
*/ |
||||||
|
public enum WriteType { |
||||||
|
PUT, |
||||||
|
MERGE, |
||||||
|
DELETE, |
||||||
|
LOG |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Represents an entry returned by |
||||||
|
* {@link org.rocksdb.WBWIRocksIterator#entry()} |
||||||
|
* |
||||||
|
* It is worth noting that a WriteEntry with |
||||||
|
* the type {@link org.rocksdb.WBWIRocksIterator.WriteType#DELETE} |
||||||
|
* or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG} |
||||||
|
* will not have a value. |
||||||
|
*/ |
||||||
|
public static class WriteEntry { |
||||||
|
WriteType type = null; |
||||||
|
final DirectSlice key; |
||||||
|
final DirectSlice value; |
||||||
|
|
||||||
|
/** |
||||||
|
* Intentionally private as this |
||||||
|
* should only be instantiated in |
||||||
|
* this manner by the outer WBWIRocksIterator |
||||||
|
* class; The class members are then modified |
||||||
|
* by calling {@link org.rocksdb.WBWIRocksIterator#entry()} |
||||||
|
*/ |
||||||
|
private WriteEntry() { |
||||||
|
key = new DirectSlice(); |
||||||
|
value = new DirectSlice(); |
||||||
|
} |
||||||
|
|
||||||
|
public WriteEntry(WriteType type, DirectSlice key, DirectSlice value) { |
||||||
|
this.type = type; |
||||||
|
this.key = key; |
||||||
|
this.value = value; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Returns the type of the Write Entry |
||||||
|
* |
||||||
|
* @return the WriteType of the WriteEntry |
||||||
|
*/ |
||||||
|
public WriteType getType() { |
||||||
|
return type; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Returns the key of the Write Entry |
||||||
|
* |
||||||
|
* @return The slice containing the key |
||||||
|
* of the WriteEntry |
||||||
|
*/ |
||||||
|
public DirectSlice getKey() { |
||||||
|
return key; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Returns the value of the Write Entry |
||||||
|
* |
||||||
|
* @return The slice containing the value of |
||||||
|
* the WriteEntry or null if the WriteEntry has |
||||||
|
* no value |
||||||
|
*/ |
||||||
|
public DirectSlice getValue() { |
||||||
|
if(!value.isInitialized()) { |
||||||
|
return null; //TODO(AR) migrate to JDK8 java.util.Optional#empty()
|
||||||
|
} else { |
||||||
|
return value; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Override |
||||||
|
public boolean equals(Object other) { |
||||||
|
if(other == null) { |
||||||
|
return false; |
||||||
|
} else if (this == other) { |
||||||
|
return true; |
||||||
|
} else if(other instanceof WriteEntry) { |
||||||
|
final WriteEntry otherWriteEntry = (WriteEntry)other; |
||||||
|
return type.equals(otherWriteEntry.type) |
||||||
|
&& key.equals(otherWriteEntry.key) |
||||||
|
&& (value.isInitialized() ? value.equals(otherWriteEntry.value) |
||||||
|
: !otherWriteEntry.value.isInitialized()); |
||||||
|
} else { |
||||||
|
return false; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,98 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
package org.rocksdb; |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Defines the interface for a Write Batch which |
||||||
|
* holds a collection of updates to apply atomically to a DB.</p> |
||||||
|
*/ |
||||||
|
public interface WriteBatchInterface { |
||||||
|
|
||||||
|
/** |
||||||
|
* Returns the number of updates in the batch. |
||||||
|
* |
||||||
|
* @return number of items in WriteBatch |
||||||
|
*/ |
||||||
|
public int count(); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Store the mapping "key->value" in the database.</p> |
||||||
|
* |
||||||
|
* @param key the specified key to be inserted. |
||||||
|
* @param value the value associated with the specified key. |
||||||
|
*/ |
||||||
|
public void put(byte[] key, byte[] value); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Store the mapping "key->value" within given column |
||||||
|
* family.</p> |
||||||
|
* |
||||||
|
* @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} |
||||||
|
* instance |
||||||
|
* @param key the specified key to be inserted. |
||||||
|
* @param value the value associated with the specified key. |
||||||
|
*/ |
||||||
|
public void put(ColumnFamilyHandle columnFamilyHandle, |
||||||
|
byte[] key, byte[] value); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Merge "value" with the existing value of "key" in the database. |
||||||
|
* "key->merge(existing, value)"</p> |
||||||
|
* |
||||||
|
* @param key the specified key to be merged. |
||||||
|
* @param value the value to be merged with the current value for |
||||||
|
* the specified key. |
||||||
|
*/ |
||||||
|
public void merge(byte[] key, byte[] value); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>Merge "value" with the existing value of "key" in given column family. |
||||||
|
* "key->merge(existing, value)"</p> |
||||||
|
* |
||||||
|
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance |
||||||
|
* @param key the specified key to be merged. |
||||||
|
* @param value the value to be merged with the current value for |
||||||
|
* the specified key. |
||||||
|
*/ |
||||||
|
public void merge(ColumnFamilyHandle columnFamilyHandle, |
||||||
|
byte[] key, byte[] value); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>If the database contains a mapping for "key", erase it. Else do nothing.</p> |
||||||
|
* |
||||||
|
* @param key Key to delete within database |
||||||
|
*/ |
||||||
|
public void remove(byte[] key); |
||||||
|
|
||||||
|
/** |
||||||
|
* <p>If column family contains a mapping for "key", erase it. Else do nothing.</p> |
||||||
|
* |
||||||
|
* @param columnFamilyHandle {@link ColumnFamilyHandle} instance |
||||||
|
* @param key Key to delete within database |
||||||
|
*/ |
||||||
|
public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key); |
||||||
|
|
||||||
|
/** |
||||||
|
* Append a blob of arbitrary size to the records in this batch. The blob will |
||||||
|
* be stored in the transaction log but not in any other file. In particular, |
||||||
|
* it will not be persisted to the SST files. When iterating over this |
||||||
|
* WriteBatch, WriteBatch::Handler::LogData will be called with the contents |
||||||
|
* of the blob as it is encountered. Blobs, puts, deletes, and merges will be |
||||||
|
* encountered in the same order in thich they were inserted. The blob will |
||||||
|
* NOT consume sequence number(s) and will NOT increase the count of the batch |
||||||
|
* |
||||||
|
* Example application: add timestamps to the transaction log for use in |
||||||
|
* replication. |
||||||
|
* |
||||||
|
* @param blob binary object to be inserted |
||||||
|
*/ |
||||||
|
public void putLogData(byte[] blob); |
||||||
|
|
||||||
|
/** |
||||||
|
* Clear all updates buffered in this batch |
||||||
|
*/ |
||||||
|
public void clear(); |
||||||
|
} |
@ -0,0 +1,149 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
|
||||||
|
package org.rocksdb; |
||||||
|
|
||||||
|
/** |
||||||
|
* Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable |
||||||
|
* index built for all the keys inserted. |
||||||
|
* |
||||||
|
* Calling put, merge, remove or putLogData calls the same function |
||||||
|
* as with {@link org.rocksdb.WriteBatch} whilst also building an index. |
||||||
|
* |
||||||
|
* A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator() }to create an iterator |
||||||
|
* over the write batch or |
||||||
|
* {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} to |
||||||
|
* get an iterator for the database with Read-Your-Own-Writes like capability |
||||||
|
*/ |
||||||
|
public class WriteBatchWithIndex extends AbstractWriteBatch { |
||||||
|
/** |
||||||
|
* Creates a WriteBatchWithIndex where no bytes |
||||||
|
* are reserved up-front, bytewise comparison is |
||||||
|
* used for fallback key comparisons, |
||||||
|
* and duplicate keys operations are retained |
||||||
|
*/ |
||||||
|
public WriteBatchWithIndex() { |
||||||
|
super(); |
||||||
|
newWriteBatchWithIndex(); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
/** |
||||||
|
* Creates a WriteBatchWithIndex where no bytes |
||||||
|
* are reserved up-front, bytewise comparison is |
||||||
|
* used for fallback key comparisons, and duplicate key |
||||||
|
* assignment is determined by the constructor argument |
||||||
|
* |
||||||
|
* @param overwriteKey if true, overwrite the key in the index when |
||||||
|
* inserting a duplicate key, in this way an iterator will never |
||||||
|
* show two entries with the same key. |
||||||
|
*/ |
||||||
|
public WriteBatchWithIndex(boolean overwriteKey) { |
||||||
|
super(); |
||||||
|
newWriteBatchWithIndex(overwriteKey); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Creates a WriteBatchWithIndex |
||||||
|
* |
||||||
|
* @param fallbackIndexComparator We fallback to this comparator |
||||||
|
* to compare keys within a column family if we cannot determine |
||||||
|
* the column family and so look up it's comparator. |
||||||
|
* |
||||||
|
* @param reservedBytes reserved bytes in underlying WriteBatch |
||||||
|
* |
||||||
|
* @param overwriteKey if true, overwrite the key in the index when |
||||||
|
* inserting a duplicate key, in this way an iterator will never |
||||||
|
* show two entries with the same key. |
||||||
|
*/ |
||||||
|
public WriteBatchWithIndex(AbstractComparator fallbackIndexComparator, int reservedBytes, |
||||||
|
boolean overwriteKey) { |
||||||
|
super(); |
||||||
|
newWriteBatchWithIndex(fallbackIndexComparator.nativeHandle_, reservedBytes, overwriteKey); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Create an iterator of a column family. User can call |
||||||
|
* {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to |
||||||
|
* search to the next entry of or after a key. Keys will be iterated in the |
||||||
|
* order given by index_comparator. For multiple updates on the same key, |
||||||
|
* each update will be returned as a separate entry, in the order of update |
||||||
|
* time. |
||||||
|
* |
||||||
|
* @param columnFamilyHandle The column family to iterate over |
||||||
|
* @return An iterator for the Write Batch contents, restricted to the column family |
||||||
|
*/ |
||||||
|
public WBWIRocksIterator newIterator(ColumnFamilyHandle columnFamilyHandle) { |
||||||
|
return new WBWIRocksIterator(this, iterator1(columnFamilyHandle.nativeHandle_)); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Create an iterator of the default column family. User can call |
||||||
|
* {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to |
||||||
|
* search to the next entry of or after a key. Keys will be iterated in the |
||||||
|
* order given by index_comparator. For multiple updates on the same key, |
||||||
|
* each update will be returned as a separate entry, in the order of update |
||||||
|
* time. |
||||||
|
* |
||||||
|
* @return An iterator for the Write Batch contents |
||||||
|
*/ |
||||||
|
public WBWIRocksIterator newIterator() { |
||||||
|
return new WBWIRocksIterator(this, iterator0()); |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Provides Read-Your-Own-Writes like functionality by |
||||||
|
* creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} |
||||||
|
* as a delta and baseIterator as a base |
||||||
|
* |
||||||
|
* @param columnFamilyHandle The column family to iterate over |
||||||
|
* @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()} |
||||||
|
* @return An iterator which shows a view comprised of both the database point-in-time |
||||||
|
* from baseIterator and modifications made in this write batch. |
||||||
|
*/ |
||||||
|
public RocksIterator newIteratorWithBase(ColumnFamilyHandle columnFamilyHandle, |
||||||
|
RocksIterator baseIterator) { |
||||||
|
RocksIterator iterator = new RocksIterator( |
||||||
|
baseIterator.parent_, |
||||||
|
iteratorWithBase(columnFamilyHandle.nativeHandle_, baseIterator.nativeHandle_)); |
||||||
|
//when the iterator is deleted it will also delete the baseIterator
|
||||||
|
baseIterator.disOwnNativeHandle(); |
||||||
|
return iterator; |
||||||
|
} |
||||||
|
|
||||||
|
/** |
||||||
|
* Provides Read-Your-Own-Writes like functionality by |
||||||
|
* creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} |
||||||
|
* as a delta and baseIterator as a base. Operates on the default column family. |
||||||
|
* |
||||||
|
* @param baseIterator The base iterator, e.g. {@link org.rocksdb.RocksDB#newIterator()} |
||||||
|
* @return An iterator which shows a view comprised of both the database point-in-time |
||||||
|
* from baseIterator and modifications made in this write batch. |
||||||
|
*/ |
||||||
|
public RocksIterator newIteratorWithBase(RocksIterator baseIterator) { |
||||||
|
return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator); |
||||||
|
} |
||||||
|
|
||||||
|
@Override final native void disposeInternal(long handle); |
||||||
|
@Override final native int count0(); |
||||||
|
@Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen); |
||||||
|
@Override final native void put(byte[] key, int keyLen, byte[] value, int valueLen, |
||||||
|
long cfHandle); |
||||||
|
@Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen); |
||||||
|
@Override final native void merge(byte[] key, int keyLen, byte[] value, int valueLen, |
||||||
|
long cfHandle); |
||||||
|
@Override final native void remove(byte[] key, int keyLen); |
||||||
|
@Override final native void remove(byte[] key, int keyLen, long cfHandle); |
||||||
|
@Override final native void putLogData(byte[] blob, int blobLen); |
||||||
|
@Override final native void clear0(); |
||||||
|
|
||||||
|
private native void newWriteBatchWithIndex(); |
||||||
|
private native void newWriteBatchWithIndex(boolean overwriteKey); |
||||||
|
private native void newWriteBatchWithIndex(long fallbackIndexComparatorHandle, int reservedBytes, |
||||||
|
boolean overwriteKey); |
||||||
|
private native long iterator0(); |
||||||
|
private native long iterator1(long cfHandle); |
||||||
|
private native long iteratorWithBase(long baseIteratorHandle, long cfHandle); |
||||||
|
} |
@ -0,0 +1,247 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
//
|
||||||
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
package org.rocksdb.test; |
||||||
|
|
||||||
|
import org.junit.ClassRule; |
||||||
|
import org.junit.Rule; |
||||||
|
import org.junit.Test; |
||||||
|
import org.junit.rules.TemporaryFolder; |
||||||
|
import org.rocksdb.WriteBatchWithIndex; |
||||||
|
import org.rocksdb.DirectSlice; |
||||||
|
import org.rocksdb.Options; |
||||||
|
import org.rocksdb.RocksDB; |
||||||
|
import org.rocksdb.RocksDBException; |
||||||
|
import org.rocksdb.RocksIterator; |
||||||
|
import org.rocksdb.WriteOptions; |
||||||
|
import org.rocksdb.WBWIRocksIterator; |
||||||
|
|
||||||
|
import java.nio.ByteBuffer; |
||||||
|
import java.util.ArrayDeque; |
||||||
|
import java.util.Deque; |
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat; |
||||||
|
|
||||||
|
|
||||||
|
public class WriteBatchWithIndexTest { |
||||||
|
|
||||||
|
@ClassRule |
||||||
|
public static final RocksMemoryResource rocksMemoryResource = |
||||||
|
new RocksMemoryResource(); |
||||||
|
|
||||||
|
@Rule |
||||||
|
public TemporaryFolder dbFolder = new TemporaryFolder(); |
||||||
|
|
||||||
|
@Test |
||||||
|
public void readYourOwnWrites() throws RocksDBException { |
||||||
|
RocksDB db = null; |
||||||
|
Options options = null; |
||||||
|
try { |
||||||
|
options = new Options(); |
||||||
|
// Setup options
|
||||||
|
options.setCreateIfMissing(true); |
||||||
|
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); |
||||||
|
|
||||||
|
final byte[] k1 = "key1".getBytes(); |
||||||
|
final byte[] v1 = "value1".getBytes(); |
||||||
|
final byte[] k2 = "key2".getBytes(); |
||||||
|
final byte[] v2 = "value2".getBytes(); |
||||||
|
|
||||||
|
db.put(k1, v1); |
||||||
|
db.put(k2, v2); |
||||||
|
|
||||||
|
final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); |
||||||
|
|
||||||
|
RocksIterator base = null; |
||||||
|
RocksIterator it = null; |
||||||
|
try { |
||||||
|
base = db.newIterator(); |
||||||
|
it = wbwi.newIteratorWithBase(base); |
||||||
|
|
||||||
|
it.seek(k1); |
||||||
|
assertThat(it.isValid()).isTrue(); |
||||||
|
assertThat(it.key()).isEqualTo(k1); |
||||||
|
assertThat(it.value()).isEqualTo(v1); |
||||||
|
|
||||||
|
it.seek(k2); |
||||||
|
assertThat(it.isValid()).isTrue(); |
||||||
|
assertThat(it.key()).isEqualTo(k2); |
||||||
|
assertThat(it.value()).isEqualTo(v2); |
||||||
|
|
||||||
|
//put data to the write batch and make sure we can read it.
|
||||||
|
final byte[] k3 = "key3".getBytes(); |
||||||
|
final byte[] v3 = "value3".getBytes(); |
||||||
|
wbwi.put(k3, v3); |
||||||
|
it.seek(k3); |
||||||
|
assertThat(it.isValid()).isTrue(); |
||||||
|
assertThat(it.key()).isEqualTo(k3); |
||||||
|
assertThat(it.value()).isEqualTo(v3); |
||||||
|
|
||||||
|
//update k2 in the write batch and check the value
|
||||||
|
final byte[] v2Other = "otherValue2".getBytes(); |
||||||
|
wbwi.put(k2, v2Other); |
||||||
|
it.seek(k2); |
||||||
|
assertThat(it.isValid()).isTrue(); |
||||||
|
assertThat(it.key()).isEqualTo(k2); |
||||||
|
assertThat(it.value()).isEqualTo(v2Other); |
||||||
|
|
||||||
|
//remove k1 and make sure we can read back the write
|
||||||
|
wbwi.remove(k1); |
||||||
|
it.seek(k1); |
||||||
|
assertThat(it.key()).isNotEqualTo(k1); |
||||||
|
|
||||||
|
//reinsert k1 and make sure we see the new value
|
||||||
|
final byte[] v1Other = "otherValue1".getBytes(); |
||||||
|
wbwi.put(k1, v1Other); |
||||||
|
it.seek(k1); |
||||||
|
assertThat(it.isValid()).isTrue(); |
||||||
|
assertThat(it.key()).isEqualTo(k1); |
||||||
|
assertThat(it.value()).isEqualTo(v1Other); |
||||||
|
} finally { |
||||||
|
if (it != null) { |
||||||
|
it.dispose(); |
||||||
|
} |
||||||
|
if (base != null) { |
||||||
|
base.dispose(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
} finally { |
||||||
|
if (db != null) { |
||||||
|
db.close(); |
||||||
|
} |
||||||
|
if (options != null) { |
||||||
|
options.dispose(); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void write_writeBatchWithIndex() throws RocksDBException { |
||||||
|
RocksDB db = null; |
||||||
|
Options options = null; |
||||||
|
try { |
||||||
|
options = new Options(); |
||||||
|
// Setup options
|
||||||
|
options.setCreateIfMissing(true); |
||||||
|
db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); |
||||||
|
|
||||||
|
final byte[] k1 = "key1".getBytes(); |
||||||
|
final byte[] v1 = "value1".getBytes(); |
||||||
|
final byte[] k2 = "key2".getBytes(); |
||||||
|
final byte[] v2 = "value2".getBytes(); |
||||||
|
|
||||||
|
WriteBatchWithIndex wbwi = null; |
||||||
|
|
||||||
|
try { |
||||||
|
wbwi = new WriteBatchWithIndex(); |
||||||
|
|
||||||
|
|
||||||
|
wbwi.put(k1, v1); |
||||||
|
wbwi.put(k2, v2); |
||||||
|
|
||||||
|
db.write(new WriteOptions(), wbwi); |
||||||
|
} finally { |
||||||
|
if(wbwi != null) { |
||||||
|
wbwi.dispose(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
assertThat(db.get(k1)).isEqualTo(v1); |
||||||
|
assertThat(db.get(k2)).isEqualTo(v2); |
||||||
|
|
||||||
|
} finally { |
||||||
|
if (db != null) { |
||||||
|
db.close(); |
||||||
|
} |
||||||
|
if (options != null) { |
||||||
|
options.dispose(); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
@Test |
||||||
|
public void iterator() throws RocksDBException { |
||||||
|
final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); |
||||||
|
|
||||||
|
final String k1 = "key1"; |
||||||
|
final String v1 = "value1"; |
||||||
|
final String k2 = "key2"; |
||||||
|
final String v2 = "value2"; |
||||||
|
final String k3 = "key3"; |
||||||
|
final String v3 = "value3"; |
||||||
|
final byte[] k1b = k1.getBytes(); |
||||||
|
final byte[] v1b = v1.getBytes(); |
||||||
|
final byte[] k2b = k2.getBytes(); |
||||||
|
final byte[] v2b = v2.getBytes(); |
||||||
|
final byte[] k3b = k3.getBytes(); |
||||||
|
final byte[] v3b = v3.getBytes(); |
||||||
|
|
||||||
|
//add put records
|
||||||
|
wbwi.put(k1b, v1b); |
||||||
|
wbwi.put(k2b, v2b); |
||||||
|
wbwi.put(k3b, v3b); |
||||||
|
|
||||||
|
//add a deletion record
|
||||||
|
final String k4 = "key4"; |
||||||
|
final byte[] k4b = k4.getBytes(); |
||||||
|
wbwi.remove(k4b); |
||||||
|
|
||||||
|
WBWIRocksIterator.WriteEntry[] expected = { |
||||||
|
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, |
||||||
|
new DirectSlice(k1), new DirectSlice(v1)), |
||||||
|
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, |
||||||
|
new DirectSlice(k2), new DirectSlice(v2)), |
||||||
|
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, |
||||||
|
new DirectSlice(k3), new DirectSlice(v3)), |
||||||
|
new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE, |
||||||
|
new DirectSlice(k4), DirectSlice.NONE) |
||||||
|
}; |
||||||
|
|
||||||
|
WBWIRocksIterator it = null; |
||||||
|
try { |
||||||
|
it = wbwi.newIterator(); |
||||||
|
|
||||||
|
//direct access - seek to key offsets
|
||||||
|
final int[] testOffsets = {2, 0, 1, 3}; |
||||||
|
|
||||||
|
for(int i = 0; i < testOffsets.length; i++) { |
||||||
|
final int testOffset = testOffsets[i]; |
||||||
|
final byte[] key = toArray(expected[testOffset].getKey().data()); |
||||||
|
|
||||||
|
it.seek(key); |
||||||
|
assertThat(it.isValid()).isTrue(); |
||||||
|
assertThat(it.entry()).isEqualTo(expected[testOffset]); |
||||||
|
} |
||||||
|
|
||||||
|
//forward iterative access
|
||||||
|
int i = 0; |
||||||
|
for(it.seekToFirst(); it.isValid(); it.next()) { |
||||||
|
assertThat(it.entry()).isEqualTo(expected[i++]); |
||||||
|
} |
||||||
|
|
||||||
|
//reverse iterative access
|
||||||
|
i = expected.length - 1; |
||||||
|
for(it.seekToLast(); it.isValid(); it.prev()) { |
||||||
|
assertThat(it.entry()).isEqualTo(expected[i--]); |
||||||
|
} |
||||||
|
|
||||||
|
} finally { |
||||||
|
if(it != null) { |
||||||
|
it.dispose(); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private byte[] toArray(final ByteBuffer buf) { |
||||||
|
final byte[] ary = new byte[buf.remaining()]; |
||||||
|
buf.get(ary); |
||||||
|
return ary; |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,378 @@ |
|||||||
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||||
|
// This source code is licensed under the BSD-style license found in the
|
||||||
|
// LICENSE file in the root directory of this source tree. An additional grant
|
||||||
|
// of patent rights can be found in the PATENTS file in the same directory.
|
||||||
|
//
|
||||||
|
// This file implements the "bridge" between Java and C++ and enables
|
||||||
|
// calling c++ rocksdb::WriteBatchWithIndex methods from Java side.
|
||||||
|
|
||||||
|
#include "include/org_rocksdb_WBWIRocksIterator.h" |
||||||
|
#include "include/org_rocksdb_WriteBatchWithIndex.h" |
||||||
|
#include "rocksdb/comparator.h" |
||||||
|
#include "rocksdb/utilities/write_batch_with_index.h" |
||||||
|
#include "rocksjni/portal.h" |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: newWriteBatchWithIndex |
||||||
|
* Signature: ()V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__( |
||||||
|
JNIEnv* env, jobject jobj) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex(); |
||||||
|
rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: newWriteBatchWithIndex |
||||||
|
* Signature: (Z)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z( |
||||||
|
JNIEnv* env, jobject jobj, jboolean joverwrite_key) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = |
||||||
|
new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, |
||||||
|
static_cast<bool>(joverwrite_key)); |
||||||
|
rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: newWriteBatchWithIndex |
||||||
|
* Signature: (JIZ)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ( |
||||||
|
JNIEnv* env, jobject jobj, jlong jfallback_index_comparator_handle, |
||||||
|
jint jreserved_bytes, jboolean joverwrite_key) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = |
||||||
|
new rocksdb::WriteBatchWithIndex( |
||||||
|
reinterpret_cast<rocksdb::Comparator*>(jfallback_index_comparator_handle), |
||||||
|
static_cast<size_t>(jreserved_bytes), static_cast<bool>(joverwrite_key)); |
||||||
|
rocksdb::WriteBatchWithIndexJni::setHandle(env, jobj, wbwi); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: count |
||||||
|
* Signature: ()I |
||||||
|
*/ |
||||||
|
jint Java_org_rocksdb_WriteBatchWithIndex_count0( |
||||||
|
JNIEnv* env, jobject jobj) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
|
||||||
|
return static_cast<jint>(wbwi->GetWriteBatch()->Count()); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: put |
||||||
|
* Signature: ([BI[BI)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BI( |
||||||
|
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, |
||||||
|
jbyteArray jentry_value, jint jentry_value_len) { |
||||||
|
auto* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
auto put = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) { |
||||||
|
wbwi->Put(key, value); |
||||||
|
}; |
||||||
|
rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value, |
||||||
|
jentry_value_len); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: put |
||||||
|
* Signature: ([BI[BIJ)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_put___3BI_3BIJ( |
||||||
|
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, |
||||||
|
jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { |
||||||
|
auto* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle); |
||||||
|
assert(cf_handle != nullptr); |
||||||
|
auto put = [&wbwi, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) { |
||||||
|
wbwi->Put(cf_handle, key, value); |
||||||
|
}; |
||||||
|
rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value, |
||||||
|
jentry_value_len); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: merge |
||||||
|
* Signature: ([BI[BI)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BI( |
||||||
|
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, |
||||||
|
jbyteArray jentry_value, jint jentry_value_len) { |
||||||
|
auto* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
auto merge = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) { |
||||||
|
wbwi->Merge(key, value); |
||||||
|
}; |
||||||
|
rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value, |
||||||
|
jentry_value_len); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: merge |
||||||
|
* Signature: ([BI[BIJ)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_merge___3BI_3BIJ( |
||||||
|
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len, |
||||||
|
jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { |
||||||
|
auto* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle); |
||||||
|
assert(cf_handle != nullptr); |
||||||
|
auto merge = [&wbwi, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) { |
||||||
|
wbwi->Merge(cf_handle, key, value); |
||||||
|
}; |
||||||
|
rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value, |
||||||
|
jentry_value_len); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: remove |
||||||
|
* Signature: ([BI)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_remove___3BI( |
||||||
|
JNIEnv* env, jobject jobj, jbyteArray jkey, jint jkey_len) { |
||||||
|
auto* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
auto remove = [&wbwi] (rocksdb::Slice key) { |
||||||
|
wbwi->Delete(key); |
||||||
|
}; |
||||||
|
rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: remove |
||||||
|
* Signature: ([BIJ)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_remove___3BIJ( |
||||||
|
JNIEnv* env, jobject jobj, |
||||||
|
jbyteArray jkey, jint jkey_len, jlong jcf_handle) { |
||||||
|
auto* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle); |
||||||
|
assert(cf_handle != nullptr); |
||||||
|
auto remove = [&wbwi, &cf_handle] (rocksdb::Slice key) { |
||||||
|
wbwi->Delete(cf_handle, key); |
||||||
|
}; |
||||||
|
rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: putLogData |
||||||
|
* Signature: ([BI)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_putLogData( |
||||||
|
JNIEnv* env, jobject jobj, jbyteArray jblob, jint jblob_len) { |
||||||
|
auto* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
auto putLogData = [&wbwi] (rocksdb::Slice blob) { |
||||||
|
wbwi->PutLogData(blob); |
||||||
|
}; |
||||||
|
rocksdb::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: clear |
||||||
|
* Signature: ()V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_clear0( |
||||||
|
JNIEnv* env, jobject jobj) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
assert(wbwi != nullptr); |
||||||
|
|
||||||
|
wbwi->GetWriteBatch()->Clear(); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: iterator0 |
||||||
|
* Signature: ()J |
||||||
|
*/ |
||||||
|
jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0( |
||||||
|
JNIEnv* env, jobject jobj) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(); |
||||||
|
return reinterpret_cast<jlong>(wbwi_iterator); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: iterator1 |
||||||
|
* Signature: (J)J |
||||||
|
*/ |
||||||
|
jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1( |
||||||
|
JNIEnv* env, jobject jobj, jlong jcf_handle) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle); |
||||||
|
rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(cf_handle); |
||||||
|
return reinterpret_cast<jlong>(wbwi_iterator); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: iteratorWithBase |
||||||
|
* Signature: (JJ)J |
||||||
|
*/ |
||||||
|
jlong Java_org_rocksdb_WriteBatchWithIndex_iteratorWithBase( |
||||||
|
JNIEnv* env, jobject jobj, jlong jcf_handle, jlong jbi_handle) { |
||||||
|
rocksdb::WriteBatchWithIndex* wbwi = |
||||||
|
rocksdb::WriteBatchWithIndexJni::getHandle(env, jobj); |
||||||
|
auto* cf_handle = reinterpret_cast<rocksdb::ColumnFamilyHandle*>(jcf_handle); |
||||||
|
auto* base_iterator = reinterpret_cast<rocksdb::Iterator*>(jbi_handle); |
||||||
|
auto* iterator = wbwi->NewIteratorWithBase(cf_handle, base_iterator); |
||||||
|
return reinterpret_cast<jlong>(iterator); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WriteBatchWithIndex |
||||||
|
* Method: disposeInternal |
||||||
|
* Signature: (J)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
auto* wbwi = reinterpret_cast<rocksdb::WriteBatchWithIndex*>(handle); |
||||||
|
delete wbwi; |
||||||
|
} |
||||||
|
|
||||||
|
/* WBWIRocksIterator below */ |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: disposeInternal |
||||||
|
* Signature: (J)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_disposeInternal( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle); |
||||||
|
delete it; |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: isValid0 |
||||||
|
* Signature: (J)Z |
||||||
|
*/ |
||||||
|
jboolean Java_org_rocksdb_WBWIRocksIterator_isValid0( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
return reinterpret_cast<rocksdb::WBWIIterator*>(handle)->Valid(); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: seekToFirst0 |
||||||
|
* Signature: (J)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_seekToFirst0( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
reinterpret_cast<rocksdb::WBWIIterator*>(handle)->SeekToFirst(); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: seekToLast0 |
||||||
|
* Signature: (J)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_seekToLast0( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
reinterpret_cast<rocksdb::WBWIIterator*>(handle)->SeekToLast(); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: next0 |
||||||
|
* Signature: (J)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_next0( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
reinterpret_cast<rocksdb::WBWIIterator*>(handle)->Next(); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: prev0 |
||||||
|
* Signature: (J)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_prev0( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
reinterpret_cast<rocksdb::WBWIIterator*>(handle)->Prev(); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: seek0 |
||||||
|
* Signature: (J[BI)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_seek0( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle, jbyteArray jtarget, |
||||||
|
jint jtarget_len) { |
||||||
|
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle); |
||||||
|
jbyte* target = env->GetByteArrayElements(jtarget, 0); |
||||||
|
rocksdb::Slice target_slice( |
||||||
|
reinterpret_cast<char*>(target), jtarget_len); |
||||||
|
|
||||||
|
it->Seek(target_slice); |
||||||
|
|
||||||
|
env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: status0 |
||||||
|
* Signature: (J)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_status0( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle) { |
||||||
|
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle); |
||||||
|
rocksdb::Status s = it->status(); |
||||||
|
|
||||||
|
if (s.ok()) { |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
rocksdb::RocksDBExceptionJni::ThrowNew(env, s); |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Class: org_rocksdb_WBWIRocksIterator |
||||||
|
* Method: entry1 |
||||||
|
* Signature: (JLorg/rocksdb/WBWIRocksIterator/WriteEntry;)V |
||||||
|
*/ |
||||||
|
void Java_org_rocksdb_WBWIRocksIterator_entry1( |
||||||
|
JNIEnv* env, jobject jobj, jlong handle, jobject jwrite_entry) { |
||||||
|
auto* it = reinterpret_cast<rocksdb::WBWIIterator*>(handle); |
||||||
|
const rocksdb::WriteEntry& we = it->Entry(); |
||||||
|
jobject jwe = rocksdb::WBWIRocksIteratorJni::getWriteEntry(env, jobj); |
||||||
|
rocksdb::WriteEntryJni::setWriteType(env, jwe, we.type); |
||||||
|
rocksdb::WriteEntryJni::setKey(env, jwe, &we.key); |
||||||
|
if (we.type == rocksdb::kDeleteRecord || we.type == rocksdb::kLogDataRecord) { |
||||||
|
// set native handle of value slice to null if no value available
|
||||||
|
rocksdb::WriteEntryJni::setValue(env, jwe, NULL); |
||||||
|
} else { |
||||||
|
rocksdb::WriteEntryJni::setValue(env, jwe, &we.value); |
||||||
|
} |
||||||
|
} |
Loading…
Reference in new issue