Conflicts: db/db_impl.cc db/db_impl.h db/memtable_list.cc db/version_set.ccmain
commit
3d2fe844ab
@ -0,0 +1,121 @@ |
||||
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
import java.lang.*; |
||||
import java.util.*; |
||||
|
||||
/** |
||||
* WriteBatch holds a collection of updates to apply atomically to a DB. |
||||
* |
||||
* The updates are applied in the order in which they are added |
||||
* to the WriteBatch. For example, the value of "key" will be "v3" |
||||
* after the following batch is written: |
||||
* |
||||
* batch.put("key", "v1"); |
||||
* batch.remove("key"); |
||||
* batch.put("key", "v2"); |
||||
* batch.put("key", "v3"); |
||||
* |
||||
* Multiple threads can invoke const methods on a WriteBatch without |
||||
* external synchronization, but if any of the threads may call a |
||||
* non-const method, all threads accessing the same WriteBatch must use |
||||
* external synchronization. |
||||
*/ |
||||
public class WriteBatch { |
||||
public WriteBatch() { |
||||
nativeHandle_ = 0; |
||||
newWriteBatch(0); |
||||
} |
||||
|
||||
public WriteBatch(int reserved_bytes) { |
||||
nativeHandle_ = 0; |
||||
newWriteBatch(reserved_bytes); |
||||
} |
||||
|
||||
/** |
||||
* Returns the number of updates in the batch. |
||||
*/ |
||||
public native int count(); |
||||
|
||||
/** |
||||
* Store the mapping "key->value" in the database. |
||||
*/ |
||||
public void put(byte[] key, byte[] value) { |
||||
put(key, key.length, value, value.length); |
||||
} |
||||
|
||||
/** |
||||
* Merge "value" with the existing value of "key" in the database. |
||||
* "key->merge(existing, value)" |
||||
*/ |
||||
public void merge(byte[] key, byte[] value) { |
||||
merge(key, key.length, value, value.length); |
||||
} |
||||
|
||||
/** |
||||
* If the database contains a mapping for "key", erase it. Else do nothing. |
||||
*/ |
||||
public void remove(byte[] key) { |
||||
remove(key, key.length); |
||||
} |
||||
|
||||
/** |
||||
* Append a blob of arbitrary size to the records in this batch. The blob will |
||||
* be stored in the transaction log but not in any other file. In particular, |
||||
* it will not be persisted to the SST files. When iterating over this |
||||
* WriteBatch, WriteBatch::Handler::LogData will be called with the contents |
||||
* of the blob as it is encountered. Blobs, puts, deletes, and merges will be |
||||
* encountered in the same order in thich they were inserted. The blob will |
||||
* NOT consume sequence number(s) and will NOT increase the count of the batch |
||||
* |
||||
* Example application: add timestamps to the transaction log for use in |
||||
* replication. |
||||
*/ |
||||
public void putLogData(byte[] blob) { |
||||
putLogData(blob, blob.length); |
||||
} |
||||
|
||||
/** |
||||
* Clear all updates buffered in this batch |
||||
*/ |
||||
public native void clear(); |
||||
|
||||
/** |
||||
* Delete the c++ side pointer. |
||||
*/ |
||||
public synchronized void dispose() { |
||||
if (nativeHandle_ != 0) { |
||||
dispose0(); |
||||
} |
||||
} |
||||
|
||||
@Override protected void finalize() { |
||||
dispose(); |
||||
} |
||||
|
||||
private native void newWriteBatch(int reserved_bytes); |
||||
private native void put(byte[] key, int keyLen, |
||||
byte[] value, int valueLen); |
||||
private native void merge(byte[] key, int keyLen, |
||||
byte[] value, int valueLen); |
||||
private native void remove(byte[] key, int keyLen); |
||||
private native void putLogData(byte[] blob, int blobLen); |
||||
private native void dispose0(); |
||||
|
||||
private long nativeHandle_; |
||||
} |
||||
|
||||
/** |
||||
* Package-private class which provides java api to access |
||||
* c++ WriteBatchInternal. |
||||
*/ |
||||
class WriteBatchInternal { |
||||
static native void setSequence(WriteBatch batch, long sn); |
||||
static native long sequence(WriteBatch batch); |
||||
static native void append(WriteBatch b1, WriteBatch b2); |
||||
} |
||||
|
@ -0,0 +1,125 @@ |
||||
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
package org.rocksdb; |
||||
|
||||
import java.util.*; |
||||
import java.lang.*; |
||||
import java.io.UnsupportedEncodingException; |
||||
|
||||
/** |
||||
* This class mimics the db/write_batch_test.cc in the c++ rocksdb library. |
||||
*/ |
||||
public class WriteBatchTest { |
||||
static { |
||||
System.loadLibrary("rocksdbjni"); |
||||
} |
||||
|
||||
public static void main(String args[]) { |
||||
System.out.println("Testing WriteBatchTest.Empty ==="); |
||||
Empty(); |
||||
|
||||
System.out.println("Testing WriteBatchTest.Multiple ==="); |
||||
Multiple(); |
||||
|
||||
System.out.println("Testing WriteBatchTest.Append ==="); |
||||
Append(); |
||||
|
||||
System.out.println("Testing WriteBatchTest.Blob ==="); |
||||
Blob(); |
||||
|
||||
// The following tests have not yet ported.
|
||||
// Continue();
|
||||
// PutGatherSlices();
|
||||
|
||||
System.out.println("Passed all WriteBatchTest!"); |
||||
} |
||||
|
||||
static void Empty() { |
||||
WriteBatch batch = new WriteBatch(); |
||||
assert(batch.count() == 0); |
||||
} |
||||
|
||||
static void Multiple() { |
||||
try { |
||||
WriteBatch batch = new WriteBatch(); |
||||
batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); |
||||
batch.remove("box".getBytes("US-ASCII")); |
||||
batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII")); |
||||
WriteBatchInternal.setSequence(batch, 100); |
||||
assert(100 == WriteBatchInternal.sequence(batch)); |
||||
assert(3 == batch.count()); |
||||
assert(new String("Put(baz, boo)@102" + |
||||
"Delete(box)@101" + |
||||
"Put(foo, bar)@100") |
||||
.equals(new String(getContents(batch), "US-ASCII"))); |
||||
} catch (UnsupportedEncodingException e) { |
||||
System.err.println(e); |
||||
assert(false); |
||||
} |
||||
} |
||||
|
||||
static void Append() { |
||||
WriteBatch b1 = new WriteBatch(); |
||||
WriteBatch b2 = new WriteBatch(); |
||||
WriteBatchInternal.setSequence(b1, 200); |
||||
WriteBatchInternal.setSequence(b2, 300); |
||||
WriteBatchInternal.append(b1, b2); |
||||
assert(getContents(b1).length == 0); |
||||
assert(b1.count() == 0); |
||||
try { |
||||
b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII")); |
||||
WriteBatchInternal.append(b1, b2); |
||||
assert("Put(a, va)@200".equals(new String(getContents(b1), "US-ASCII"))); |
||||
assert(1 == b1.count()); |
||||
b2.clear(); |
||||
b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII")); |
||||
WriteBatchInternal.append(b1, b2); |
||||
assert(new String("Put(a, va)@200" + |
||||
"Put(b, vb)@201") |
||||
.equals(new String(getContents(b1), "US-ASCII"))); |
||||
assert(2 == b1.count()); |
||||
b2.remove("foo".getBytes("US-ASCII")); |
||||
WriteBatchInternal.append(b1, b2); |
||||
assert(new String("Put(a, va)@200" + |
||||
"Put(b, vb)@202" + |
||||
"Put(b, vb)@201" + |
||||
"Delete(foo)@203") |
||||
.equals(new String(getContents(b1), "US-ASCII"))); |
||||
assert(4 == b1.count()); |
||||
} catch (UnsupportedEncodingException e) { |
||||
System.err.println(e); |
||||
assert(false); |
||||
} |
||||
} |
||||
|
||||
static void Blob() { |
||||
WriteBatch batch = new WriteBatch(); |
||||
try { |
||||
batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII")); |
||||
batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII")); |
||||
batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII")); |
||||
batch.putLogData("blob1".getBytes("US-ASCII")); |
||||
batch.remove("k2".getBytes("US-ASCII")); |
||||
batch.putLogData("blob2".getBytes("US-ASCII")); |
||||
batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); |
||||
assert(5 == batch.count()); |
||||
assert(new String("Merge(foo, bar)@4" + |
||||
"Put(k1, v1)@0" + |
||||
"Delete(k2)@3" + |
||||
"Put(k2, v2)@1" + |
||||
"Put(k3, v3)@2") |
||||
.equals(new String(getContents(batch), "US-ASCII"))); |
||||
} catch (UnsupportedEncodingException e) { |
||||
System.err.println(e); |
||||
assert(false); |
||||
} |
||||
} |
||||
|
||||
static native byte[] getContents(WriteBatch batch); |
||||
} |
@ -0,0 +1,96 @@ |
||||
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
package org.rocksdb; |
||||
|
||||
/** |
||||
* Options that control write operations. |
||||
* |
||||
* Note that developers should call WriteOptions.dispose() to release the |
||||
* c++ side memory before a WriteOptions instance runs out of scope. |
||||
*/ |
||||
public class WriteOptions { |
||||
public WriteOptions() { |
||||
nativeHandle_ = 0; |
||||
newWriteOptions(); |
||||
} |
||||
|
||||
public synchronized void dispose() { |
||||
if (nativeHandle_ != 0) { |
||||
dispose0(nativeHandle_); |
||||
} |
||||
} |
||||
|
||||
/** |
||||
* If true, the write will be flushed from the operating system |
||||
* buffer cache (by calling WritableFile::Sync()) before the write |
||||
* is considered complete. If this flag is true, writes will be |
||||
* slower. |
||||
* |
||||
* If this flag is false, and the machine crashes, some recent |
||||
* writes may be lost. Note that if it is just the process that |
||||
* crashes (i.e., the machine does not reboot), no writes will be |
||||
* lost even if sync==false. |
||||
* |
||||
* In other words, a DB write with sync==false has similar |
||||
* crash semantics as the "write()" system call. A DB write |
||||
* with sync==true has similar crash semantics to a "write()" |
||||
* system call followed by "fdatasync()". |
||||
* |
||||
* Default: false |
||||
*/ |
||||
public void setSync(boolean flag) { |
||||
setSync(nativeHandle_, flag); |
||||
} |
||||
|
||||
/** |
||||
* If true, the write will be flushed from the operating system |
||||
* buffer cache (by calling WritableFile::Sync()) before the write |
||||
* is considered complete. If this flag is true, writes will be |
||||
* slower. |
||||
* |
||||
* If this flag is false, and the machine crashes, some recent |
||||
* writes may be lost. Note that if it is just the process that |
||||
* crashes (i.e., the machine does not reboot), no writes will be |
||||
* lost even if sync==false. |
||||
* |
||||
* In other words, a DB write with sync==false has similar |
||||
* crash semantics as the "write()" system call. A DB write |
||||
* with sync==true has similar crash semantics to a "write()" |
||||
* system call followed by "fdatasync()". |
||||
*/ |
||||
public boolean sync() { |
||||
return sync(nativeHandle_); |
||||
} |
||||
|
||||
/** |
||||
* If true, writes will not first go to the write ahead log, |
||||
* and the write may got lost after a crash. |
||||
*/ |
||||
public void setDisableWAL(boolean flag) { |
||||
setDisableWAL(nativeHandle_, flag); |
||||
} |
||||
|
||||
/** |
||||
* If true, writes will not first go to the write ahead log, |
||||
* and the write may got lost after a crash. |
||||
*/ |
||||
public boolean disableWAL() { |
||||
return disableWAL(nativeHandle_); |
||||
} |
||||
|
||||
@Override protected void finalize() { |
||||
dispose(); |
||||
} |
||||
|
||||
private native void newWriteOptions(); |
||||
private native void setSync(long handle, boolean flag); |
||||
private native boolean sync(long handle); |
||||
private native void setDisableWAL(long handle, boolean flag); |
||||
private native boolean disableWAL(long handle); |
||||
private native void dispose0(long handle); |
||||
|
||||
protected long nativeHandle_; |
||||
} |
@ -0,0 +1,263 @@ |
||||
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
// This file implements the "bridge" between Java and C++ and enables
|
||||
// calling c++ rocksdb::WriteBatch methods from Java side.
|
||||
#include <memory> |
||||
|
||||
#include "include/org_rocksdb_WriteBatch.h" |
||||
#include "include/org_rocksdb_WriteBatchInternal.h" |
||||
#include "include/org_rocksdb_WriteBatchTest.h" |
||||
#include "rocksjni/portal.h" |
||||
#include "rocksdb/db.h" |
||||
#include "db/memtable.h" |
||||
#include "rocksdb/write_batch.h" |
||||
#include "db/write_batch_internal.h" |
||||
#include "rocksdb/env.h" |
||||
#include "rocksdb/memtablerep.h" |
||||
#include "util/logging.h" |
||||
#include "util/testharness.h" |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: newWriteBatch |
||||
* Signature: (I)V |
||||
*/ |
||||
void Java_org_rocksdb_WriteBatch_newWriteBatch( |
||||
JNIEnv* env, jobject jobj, jint jreserved_bytes) { |
||||
rocksdb::WriteBatch* wb = new rocksdb::WriteBatch( |
||||
static_cast<size_t>(jreserved_bytes)); |
||||
|
||||
rocksdb::WriteBatchJni::setHandle(env, jobj, wb); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: count |
||||
* Signature: ()I |
||||
*/ |
||||
jint Java_org_rocksdb_WriteBatch_count(JNIEnv* env, jobject jobj) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
return static_cast<jint>(wb->Count()); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: clear |
||||
* Signature: ()V |
||||
*/ |
||||
void Java_org_rocksdb_WriteBatch_clear(JNIEnv* env, jobject jobj) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
wb->Clear(); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: put |
||||
* Signature: ([BI[BI)V |
||||
*/ |
||||
void Java_org_rocksdb_WriteBatch_put( |
||||
JNIEnv* env, jobject jobj, |
||||
jbyteArray jkey, jint jkey_len, |
||||
jbyteArray jvalue, jint jvalue_len) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
jbyte* key = env->GetByteArrayElements(jkey, nullptr); |
||||
jbyte* value = env->GetByteArrayElements(jvalue, nullptr); |
||||
rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len); |
||||
rocksdb::Slice value_slice(reinterpret_cast<char*>(value), jvalue_len); |
||||
wb->Put(key_slice, value_slice); |
||||
env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); |
||||
env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: merge |
||||
* Signature: ([BI[BI)V |
||||
*/ |
||||
JNIEXPORT void JNICALL Java_org_rocksdb_WriteBatch_merge( |
||||
JNIEnv* env, jobject jobj, |
||||
jbyteArray jkey, jint jkey_len, |
||||
jbyteArray jvalue, jint jvalue_len) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
jbyte* key = env->GetByteArrayElements(jkey, nullptr); |
||||
jbyte* value = env->GetByteArrayElements(jvalue, nullptr); |
||||
rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len); |
||||
rocksdb::Slice value_slice(reinterpret_cast<char*>(value), jvalue_len); |
||||
wb->Merge(key_slice, value_slice); |
||||
env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); |
||||
env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: remove |
||||
* Signature: ([BI)V |
||||
*/ |
||||
JNIEXPORT void JNICALL Java_org_rocksdb_WriteBatch_remove( |
||||
JNIEnv* env, jobject jobj, |
||||
jbyteArray jkey, jint jkey_len) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
jbyte* key = env->GetByteArrayElements(jkey, nullptr); |
||||
rocksdb::Slice key_slice(reinterpret_cast<char*>(key), jkey_len); |
||||
wb->Delete(key_slice); |
||||
env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: putLogData |
||||
* Signature: ([BI)V |
||||
*/ |
||||
void Java_org_rocksdb_WriteBatch_putLogData( |
||||
JNIEnv* env, jobject jobj, jbyteArray jblob, jint jblob_len) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
jbyte* blob = env->GetByteArrayElements(jblob, nullptr); |
||||
rocksdb::Slice blob_slice(reinterpret_cast<char*>(blob), jblob_len); |
||||
wb->PutLogData(blob_slice); |
||||
env->ReleaseByteArrayElements(jblob, blob, JNI_ABORT); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatch |
||||
* Method: dispose0 |
||||
* Signature: ()V |
||||
*/ |
||||
void Java_org_rocksdb_WriteBatch_dispose0(JNIEnv* env, jobject jobj) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
delete wb; |
||||
|
||||
rocksdb::WriteBatchJni::setHandle(env, jobj, nullptr); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatchInternal |
||||
* Method: setSequence |
||||
* Signature: (Lorg/rocksdb/WriteBatch;J)V |
||||
*/ |
||||
void Java_org_rocksdb_WriteBatchInternal_setSequence( |
||||
JNIEnv* env, jclass jclazz, jobject jobj, jlong jsn) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
rocksdb::WriteBatchInternal::SetSequence( |
||||
wb, static_cast<rocksdb::SequenceNumber>(jsn)); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatchInternal |
||||
* Method: sequence |
||||
* Signature: (Lorg/rocksdb/WriteBatch;)J |
||||
*/ |
||||
jlong Java_org_rocksdb_WriteBatchInternal_sequence( |
||||
JNIEnv* env, jclass jclazz, jobject jobj) { |
||||
rocksdb::WriteBatch* wb = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(wb != nullptr); |
||||
|
||||
return static_cast<jlong>(rocksdb::WriteBatchInternal::Sequence(wb)); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatchInternal |
||||
* Method: append |
||||
* Signature: (Lorg/rocksdb/WriteBatch;Lorg/rocksdb/WriteBatch;)V |
||||
*/ |
||||
void Java_org_rocksdb_WriteBatchInternal_append( |
||||
JNIEnv* env, jclass jclazz, jobject jwb1, jobject jwb2) { |
||||
rocksdb::WriteBatch* wb1 = rocksdb::WriteBatchJni::getHandle(env, jwb1); |
||||
assert(wb1 != nullptr); |
||||
rocksdb::WriteBatch* wb2 = rocksdb::WriteBatchJni::getHandle(env, jwb2); |
||||
assert(wb2 != nullptr); |
||||
|
||||
rocksdb::WriteBatchInternal::Append(wb1, wb2); |
||||
} |
||||
|
||||
/*
|
||||
* Class: org_rocksdb_WriteBatchTest |
||||
* Method: getContents |
||||
* Signature: (Lorg/rocksdb/WriteBatch;)[B |
||||
*/ |
||||
jbyteArray Java_org_rocksdb_WriteBatchTest_getContents( |
||||
JNIEnv* env, jclass jclazz, jobject jobj) { |
||||
rocksdb::WriteBatch* b = rocksdb::WriteBatchJni::getHandle(env, jobj); |
||||
assert(b != nullptr); |
||||
|
||||
// todo: Currently the following code is directly copied from
|
||||
// db/write_bench_test.cc. It could be implemented in java once
|
||||
// all the necessary components can be accessed via jni api.
|
||||
|
||||
rocksdb::InternalKeyComparator cmp(rocksdb::BytewiseComparator()); |
||||
auto factory = std::make_shared<rocksdb::SkipListFactory>(); |
||||
rocksdb::Options options; |
||||
options.memtable_factory = factory; |
||||
rocksdb::MemTable* mem = new rocksdb::MemTable(cmp, options); |
||||
mem->Ref(); |
||||
std::string state; |
||||
rocksdb::Status s = rocksdb::WriteBatchInternal::InsertInto(b, mem, &options); |
||||
int count = 0; |
||||
rocksdb::Iterator* iter = mem->NewIterator(); |
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { |
||||
rocksdb::ParsedInternalKey ikey; |
||||
memset(reinterpret_cast<void*>(&ikey), 0, sizeof(ikey)); |
||||
ASSERT_TRUE(rocksdb::ParseInternalKey(iter->key(), &ikey)); |
||||
switch (ikey.type) { |
||||
case rocksdb::kTypeValue: |
||||
state.append("Put("); |
||||
state.append(ikey.user_key.ToString()); |
||||
state.append(", "); |
||||
state.append(iter->value().ToString()); |
||||
state.append(")"); |
||||
count++; |
||||
break; |
||||
case rocksdb::kTypeMerge: |
||||
state.append("Merge("); |
||||
state.append(ikey.user_key.ToString()); |
||||
state.append(", "); |
||||
state.append(iter->value().ToString()); |
||||
state.append(")"); |
||||
count++; |
||||
break; |
||||
case rocksdb::kTypeDeletion: |
||||
state.append("Delete("); |
||||
state.append(ikey.user_key.ToString()); |
||||
state.append(")"); |
||||
count++; |
||||
break; |
||||
default: |
||||
assert(false); |
||||
break; |
||||
} |
||||
state.append("@"); |
||||
state.append(rocksdb::NumberToString(ikey.sequence)); |
||||
} |
||||
delete iter; |
||||
if (!s.ok()) { |
||||
state.append(s.ToString()); |
||||
} else if (count != rocksdb::WriteBatchInternal::Count(b)) { |
||||
state.append("CountMismatch()"); |
||||
} |
||||
delete mem->Unref(); |
||||
|
||||
jbyteArray jstate = env->NewByteArray(state.size()); |
||||
env->SetByteArrayRegion( |
||||
jstate, 0, state.size(), |
||||
reinterpret_cast<const jbyte*>(state.c_str())); |
||||
|
||||
return jstate; |
||||
} |
||||
|
@ -0,0 +1,71 @@ |
||||
TMP_DIR="/tmp/rocksdb-sanity-test" |
||||
|
||||
if [ "$#" -lt 2 ]; then |
||||
echo "usage: ./auto_sanity_test.sh [new_commit] [old_commit]" |
||||
echo "Missing either [new_commit] or [old_commit], perform sanity check with the latest and 10th latest commits." |
||||
recent_commits=`git log | grep -e "^commit [a-z0-9]\+$"| head -n10 | sed -e 's/commit //g'` |
||||
commit_new=`echo "$recent_commits" | head -n1` |
||||
commit_old=`echo "$recent_commits" | tail -n1` |
||||
echo "the most recent commits are:" |
||||
echo "$recent_commits" |
||||
else |
||||
commit_new=$1 |
||||
commit_old=$2 |
||||
fi |
||||
|
||||
if [ ! -d $TMP_DIR ]; then |
||||
mkdir $TMP_DIR |
||||
fi |
||||
dir_new="${TMP_DIR}/${commit_new}" |
||||
dir_old="${TMP_DIR}/${commit_old}" |
||||
|
||||
function makestuff() { |
||||
echo "make clean" |
||||
make clean > /dev/null |
||||
echo "make db_sanity_test -j32" |
||||
make db_sanity_test -j32 > /dev/null |
||||
if [ $? -ne 0 ]; then |
||||
echo "[ERROR] Failed to perform 'make db_sanity_test'" |
||||
exit 1 |
||||
fi |
||||
} |
||||
|
||||
rm -r -f $dir_new |
||||
rm -r -f $dir_old |
||||
|
||||
echo "Running db sanity check with commits $commit_new and $commit_old." |
||||
|
||||
echo "=============================================================" |
||||
echo "Making build $commit_new" |
||||
makestuff |
||||
mv db_sanity_test new_db_sanity_test |
||||
echo "Creating db based on the new commit --- $commit_new" |
||||
./new_db_sanity_test $dir_new create |
||||
|
||||
echo "=============================================================" |
||||
echo "Making build $commit_old" |
||||
makestuff |
||||
mv db_sanity_test old_db_sanity_test |
||||
echo "Creating db based on the old commit --- $commit_old" |
||||
./old_db_sanity_test $dir_old create |
||||
|
||||
echo "=============================================================" |
||||
echo "Verifying new db $dir_new using the old commit --- $commit_old" |
||||
./old_db_sanity_test $dir_new verify |
||||
if [ $? -ne 0 ]; then |
||||
echo "[ERROR] Verification of $dir_new using commit $commit_old failed." |
||||
exit 2 |
||||
fi |
||||
|
||||
echo "=============================================================" |
||||
echo "Verifying old db $dir_old using the new commit --- $commit_new" |
||||
./new_db_sanity_test $dir_old verify |
||||
if [ $? -ne 0 ]; then |
||||
echo "[ERROR] Verification of $dir_old using commit $commit_new failed." |
||||
exit 2 |
||||
fi |
||||
|
||||
rm old_db_sanity_test |
||||
rm new_db_sanity_test |
||||
|
||||
echo "Auto sanity test passed!" |
@ -0,0 +1,62 @@ |
||||
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
#include "util/sync_point.h" |
||||
|
||||
namespace rocksdb { |
||||
|
||||
SyncPoint* SyncPoint::GetInstance() { |
||||
static SyncPoint sync_point; |
||||
return &sync_point; |
||||
} |
||||
|
||||
void SyncPoint::LoadDependency(const std::vector<Dependency>& dependencies) { |
||||
successors_.clear(); |
||||
predecessors_.clear(); |
||||
cleared_points_.clear(); |
||||
for (const auto& dependency : dependencies) { |
||||
successors_[dependency.predecessor].push_back(dependency.successor); |
||||
predecessors_[dependency.successor].push_back(dependency.predecessor); |
||||
} |
||||
} |
||||
|
||||
bool SyncPoint::PredecessorsAllCleared(const std::string& point) { |
||||
for (const auto& pred : predecessors_[point]) { |
||||
if (cleared_points_.count(pred) == 0) { |
||||
return false; |
||||
} |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
void SyncPoint::EnableProcessing() { |
||||
std::unique_lock<std::mutex> lock(mutex_); |
||||
enabled_ = true; |
||||
} |
||||
|
||||
void SyncPoint::DisableProcessing() { |
||||
std::unique_lock<std::mutex> lock(mutex_); |
||||
enabled_ = false; |
||||
} |
||||
|
||||
void SyncPoint::ClearTrace() { |
||||
std::unique_lock<std::mutex> lock(mutex_); |
||||
cleared_points_.clear(); |
||||
} |
||||
|
||||
void SyncPoint::Process(const std::string& point) { |
||||
std::unique_lock<std::mutex> lock(mutex_); |
||||
|
||||
if (!enabled_) return; |
||||
|
||||
while (!PredecessorsAllCleared(point)) { |
||||
cv_.wait(lock); |
||||
} |
||||
|
||||
cleared_points_.insert(point); |
||||
cv_.notify_all(); |
||||
} |
||||
|
||||
} // namespace rocksdb
|
@ -0,0 +1,79 @@ |
||||
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
#pragma once |
||||
|
||||
#include <condition_variable> |
||||
#include <mutex> |
||||
#include <string> |
||||
#include <unordered_set> |
||||
#include <unordered_map> |
||||
#include <vector> |
||||
|
||||
namespace rocksdb { |
||||
|
||||
// This class provides facility to reproduce race conditions deterministically
|
||||
// in unit tests.
|
||||
// Developer could specify sync points in the codebase via TEST_SYNC_POINT.
|
||||
// Each sync point represents a position in the execution stream of a thread.
|
||||
// In the unit test, 'Happens After' relationship among sync points could be
|
||||
// setup via SyncPoint::LoadDependency, to reproduce a desired interleave of
|
||||
// threads execution.
|
||||
// Refer to (DBTest,TransactionLogIteratorRace), for an exmaple use case.
|
||||
|
||||
class SyncPoint { |
||||
public: |
||||
static SyncPoint* GetInstance(); |
||||
|
||||
struct Dependency { |
||||
std::string predecessor; |
||||
std::string successor; |
||||
}; |
||||
// call once at the beginning of a test to setup the dependency between
|
||||
// sync points
|
||||
void LoadDependency(const std::vector<Dependency>& dependencies); |
||||
|
||||
// enable sync point processing (disabled on startup)
|
||||
void EnableProcessing(); |
||||
|
||||
// disable sync point processing
|
||||
void DisableProcessing(); |
||||
|
||||
// remove the execution trace of all sync points
|
||||
void ClearTrace(); |
||||
|
||||
// triggered by TEST_SYNC_POINT, blocking execution until all predecessors
|
||||
// are executed.
|
||||
void Process(const std::string& point); |
||||
|
||||
// TODO: it might be useful to provide a function that blocks until all
|
||||
// sync points are cleared.
|
||||
|
||||
private: |
||||
bool PredecessorsAllCleared(const std::string& point); |
||||
|
||||
// successor/predecessor map loaded from LoadDependency
|
||||
std::unordered_map<std::string, std::vector<std::string>> successors_; |
||||
std::unordered_map<std::string, std::vector<std::string>> predecessors_; |
||||
|
||||
std::mutex mutex_; |
||||
std::condition_variable cv_; |
||||
// sync points that have been passed through
|
||||
std::unordered_set<std::string> cleared_points_; |
||||
bool enabled_ = false; |
||||
}; |
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
// Use TEST_SYNC_POINT to specify sync points inside code base.
|
||||
// Sync points can have happens-after depedency on other sync points,
|
||||
// configured at runtime via SyncPoint::LoadDependency. This could be
|
||||
// utilized to re-produce race conditions between threads.
|
||||
// See TransactionLogIteratorRace in db_test.cc for an example use case.
|
||||
// TEST_SYNC_POINT is no op in release build.
|
||||
#ifdef NDEBUG |
||||
#define TEST_SYNC_POINT(x) |
||||
#else |
||||
#define TEST_SYNC_POINT(x) rocksdb::SyncPoint::GetInstance()->Process(x) |
||||
#endif |
Loading…
Reference in new issue