Clean up arena API

Summary:
Easy thing goes first. This patch moves arena to internal dir; based
on which, the coming patch will deal with memtable_rep.

Test Plan: make check

Reviewers: haobo, sdong, dhruba

CC: leveldb

Differential Revision: https://reviews.facebook.net/D15615
main
kailiu 11 years ago
parent ac92420fc5
commit 4e0298f23c
  1. 6
      HISTORY.md
  2. 12
      db/memtable.cc
  3. 4
      db/memtable.h
  4. 7
      db/plain_table_db_test.cc
  5. 1
      db/simple_table_db_test.cc
  6. 2
      db/skiplist.h
  7. 14
      db/skiplist_test.cc
  8. 45
      include/rocksdb/arena.h
  9. 6
      include/rocksdb/memtablerep.h
  10. 1
      table/table_test.cc
  11. 6
      tools/sst_dump.cc
  12. 21
      util/arena.cc
  13. 26
      util/arena.h
  14. 48
      util/arena_test.cc
  15. 2
      util/hash_linklist_rep.cc
  16. 2
      util/hash_skiplist_rep.cc
  17. 2
      util/vectorrep.cc

@ -1,5 +1,11 @@
# Rocksdb Change Log
## 2.8.0 (01/28/2014)
### Public API changes
* Removed arena.h from public header files.
## 2.7.0 (01/28/2014)
### Public API changes

@ -17,6 +17,8 @@
#include "rocksdb/env.h"
#include "rocksdb/iterator.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/slice_transform.h"
#include "util/arena.h"
#include "util/coding.h"
#include "util/murmurhash.h"
#include "util/mutexlock.h"
@ -38,9 +40,8 @@ namespace rocksdb {
MemTable::MemTable(const InternalKeyComparator& cmp, const Options& options)
: comparator_(cmp),
refs_(0),
arena_impl_(options.arena_block_size),
table_(options.memtable_factory->CreateMemTableRep(comparator_,
&arena_impl_)),
arena_(options.arena_block_size),
table_(options.memtable_factory->CreateMemTableRep(comparator_, &arena_)),
flush_in_progress_(false),
flush_completed_(false),
file_number_(0),
@ -61,8 +62,7 @@ MemTable::~MemTable() {
}
size_t MemTable::ApproximateMemoryUsage() {
return arena_impl_.ApproximateMemoryUsage() +
table_->ApproximateMemoryUsage();
return arena_.ApproximateMemoryUsage() + table_->ApproximateMemoryUsage();
}
int MemTable::KeyComparator::operator()(const char* prefix_len_key1,
@ -184,7 +184,7 @@ void MemTable::Add(SequenceNumber s, ValueType type,
const size_t encoded_len =
VarintLength(internal_key_size) + internal_key_size +
VarintLength(val_size) + val_size;
char* buf = arena_impl_.Allocate(encoded_len);
char* buf = arena_.Allocate(encoded_len);
char* p = EncodeVarint32(buf, internal_key_size);
memcpy(p, key.data(), key_size);
p += key_size;

@ -16,7 +16,7 @@
#include "db/version_set.h"
#include "rocksdb/db.h"
#include "rocksdb/memtablerep.h"
#include "util/arena_impl.h"
#include "util/arena.h"
#include "util/dynamic_bloom.h"
namespace rocksdb {
@ -161,7 +161,7 @@ class MemTable {
KeyComparator comparator_;
int refs_;
ArenaImpl arena_impl_;
Arena arena_;
unique_ptr<MemTableRep> table_;
// These are used to manage memtable flushes to storage

@ -11,17 +11,18 @@
#include <algorithm>
#include <set>
#include "rocksdb/db.h"
#include "rocksdb/filter_policy.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "rocksdb/cache.h"
#include "rocksdb/compaction_filter.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/table.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/plain_table_factory.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/table.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/mutexlock.h"

@ -31,6 +31,7 @@
using std::unique_ptr;
// IS THIS FILE STILL NEEDED?
namespace rocksdb {
// SimpleTable is a simple table format for UNIT TEST ONLY. It is not built

@ -34,8 +34,8 @@
#include <assert.h>
#include <stdlib.h>
#include "port/port.h"
#include "util/arena.h"
#include "util/random.h"
#include "rocksdb/arena.h"
namespace rocksdb {

@ -10,7 +10,7 @@
#include "db/skiplist.h"
#include <set>
#include "rocksdb/env.h"
#include "util/arena_impl.h"
#include "util/arena.h"
#include "util/hash.h"
#include "util/random.h"
#include "util/testharness.h"
@ -34,9 +34,9 @@ struct TestComparator {
class SkipTest { };
TEST(SkipTest, Empty) {
ArenaImpl arena_impl;
Arena arena;
TestComparator cmp;
SkipList<Key, TestComparator> list(cmp, &arena_impl);
SkipList<Key, TestComparator> list(cmp, &arena);
ASSERT_TRUE(!list.Contains(10));
SkipList<Key, TestComparator>::Iterator iter(&list);
@ -54,9 +54,9 @@ TEST(SkipTest, InsertAndLookup) {
const int R = 5000;
Random rnd(1000);
std::set<Key> keys;
ArenaImpl arena_impl;
Arena arena;
TestComparator cmp;
SkipList<Key, TestComparator> list(cmp, &arena_impl);
SkipList<Key, TestComparator> list(cmp, &arena);
for (int i = 0; i < N; i++) {
Key key = rnd.Next() % R;
if (keys.insert(key).second) {
@ -209,14 +209,14 @@ class ConcurrentTest {
// Current state of the test
State current_;
ArenaImpl arena_impl_;
Arena arena_;
// SkipList is not protected by mu_. We just use a single writer
// thread to modify it.
SkipList<Key, TestComparator> list_;
public:
ConcurrentTest() : list_(TestComparator(), &arena_impl_) { }
ConcurrentTest() : list_(TestComparator(), &arena_) {}
// REQUIRES: External synchronization
void WriteStep(Random* rnd) {

@ -1,45 +0,0 @@
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
// Arena class defines memory allocation methods. It's used by memtable and
// skiplist.
#ifndef STORAGE_ROCKSDB_INCLUDE_ARENA_H_
#define STORAGE_ROCKSDB_INCLUDE_ARENA_H_
#include <limits>
#include <memory>
namespace rocksdb {
class Arena {
public:
Arena() {};
virtual ~Arena() {};
// Return a pointer to a newly allocated memory block of "bytes" bytes.
virtual char* Allocate(size_t bytes) = 0;
// Allocate memory with the normal alignment guarantees provided by malloc.
virtual char* AllocateAligned(size_t bytes) = 0;
// Returns an estimate of the total memory used by arena.
virtual const size_t ApproximateMemoryUsage() = 0;
// Returns the total number of bytes in all blocks allocated so far.
virtual const size_t MemoryAllocatedBytes() = 0;
private:
// No copying allowed
Arena(const Arena&);
void operator=(const Arena&);
};
} // namespace rocksdb
#endif // STORAGE_ROCKSDB_INCLUDE_ARENA_H_

@ -33,11 +33,9 @@
// iteration over the entire collection is rare since doing so requires all the
// keys to be copied into a sorted data structure.
#ifndef STORAGE_ROCKSDB_DB_MEMTABLEREP_H_
#define STORAGE_ROCKSDB_DB_MEMTABLEREP_H_
#pragma once
#include <memory>
#include "rocksdb/slice_transform.h"
namespace rocksdb {
@ -199,5 +197,3 @@ extern MemTableRepFactory* NewHashLinkListRepFactory(
const SliceTransform* transform, size_t bucket_count = 50000);
}
#endif // STORAGE_ROCKSDB_DB_MEMTABLEREP_H_

@ -23,6 +23,7 @@
#include "rocksdb/plain_table_factory.h"
#include "rocksdb/env.h"
#include "rocksdb/iterator.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/memtablerep.h"
#include "table/meta_blocks.h"
#include "rocksdb/plain_table_factory.h"

@ -291,9 +291,9 @@ int main(int argc, char** argv) {
"Table Properties:\n"
"------------------------------\n"
" %s", table_properties.ToString("\n ", ": ").c_str());
fprintf(stdout, "# deleted keys: %lu\n",
rocksdb::GetDeletedKeys(
table_properties.user_collected_properties));
fprintf(stdout, "# deleted keys: %zd\n",
rocksdb::GetDeletedKeys(
table_properties.user_collected_properties));
}
}
}

@ -7,19 +7,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/arena_impl.h"
#include "util/arena.h"
#include <algorithm>
namespace rocksdb {
const size_t ArenaImpl::kMinBlockSize = 4096;
const size_t ArenaImpl::kMaxBlockSize = 2 << 30;
const size_t Arena::kMinBlockSize = 4096;
const size_t Arena::kMaxBlockSize = 2 << 30;
static const int kAlignUnit = sizeof(void*);
size_t OptimizeBlockSize(size_t block_size) {
// Make sure block_size is in optimal range
block_size = std::max(ArenaImpl::kMinBlockSize, block_size);
block_size = std::min(ArenaImpl::kMaxBlockSize, block_size);
block_size = std::max(Arena::kMinBlockSize, block_size);
block_size = std::min(Arena::kMaxBlockSize, block_size);
// make sure block_size is the multiple of kAlignUnit
if (block_size % kAlignUnit != 0) {
@ -29,19 +29,18 @@ size_t OptimizeBlockSize(size_t block_size) {
return block_size;
}
ArenaImpl::ArenaImpl(size_t block_size)
: kBlockSize(OptimizeBlockSize(block_size)) {
Arena::Arena(size_t block_size) : kBlockSize(OptimizeBlockSize(block_size)) {
assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
kBlockSize % kAlignUnit == 0);
}
ArenaImpl::~ArenaImpl() {
Arena::~Arena() {
for (const auto& block : blocks_) {
delete[] block;
}
}
char* ArenaImpl::AllocateFallback(size_t bytes, bool aligned) {
char* Arena::AllocateFallback(size_t bytes, bool aligned) {
if (bytes > kBlockSize / 4) {
// Object is more than a quarter of our block size. Allocate it separately
// to avoid wasting too much space in leftover bytes.
@ -63,7 +62,7 @@ char* ArenaImpl::AllocateFallback(size_t bytes, bool aligned) {
}
}
char* ArenaImpl::AllocateAligned(size_t bytes) {
char* Arena::AllocateAligned(size_t bytes) {
assert((kAlignUnit & (kAlignUnit - 1)) ==
0); // Pointer size should be a power of 2
size_t current_mod =
@ -83,7 +82,7 @@ char* ArenaImpl::AllocateAligned(size_t bytes) {
return result;
}
char* ArenaImpl::AllocateNewBlock(size_t block_bytes) {
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* block = new char[block_bytes];
blocks_memory_ += block_bytes;
blocks_.push_back(block);

@ -7,7 +7,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// ArenaImpl is an implementation of Arena class. For a request of small size,
// Arena is an implementation of Arena class. For a request of small size,
// it allocates a block with pre-defined block size. For a request of big
// size, it uses malloc to directly get the requested size.
@ -16,37 +16,35 @@
#include <vector>
#include <assert.h>
#include <stdint.h>
#include "rocksdb/arena.h"
#include "util/arena.h"
namespace rocksdb {
class ArenaImpl : public Arena {
class Arena {
public:
// No copying allowed
ArenaImpl(const ArenaImpl&) = delete;
void operator=(const ArenaImpl&) = delete;
Arena(const Arena&) = delete;
void operator=(const Arena&) = delete;
static const size_t kMinBlockSize;
static const size_t kMaxBlockSize;
explicit ArenaImpl(size_t block_size = kMinBlockSize);
virtual ~ArenaImpl();
explicit Arena(size_t block_size = kMinBlockSize);
~Arena();
virtual char* Allocate(size_t bytes) override;
char* Allocate(size_t bytes);
virtual char* AllocateAligned(size_t bytes) override;
char* AllocateAligned(size_t bytes);
// Returns an estimate of the total memory usage of data allocated
// by the arena (exclude the space allocated but not yet used for future
// allocations).
virtual const size_t ApproximateMemoryUsage() {
const size_t ApproximateMemoryUsage() {
return blocks_memory_ + blocks_.capacity() * sizeof(char*) -
alloc_bytes_remaining_;
}
virtual const size_t MemoryAllocatedBytes() override {
return blocks_memory_;
}
const size_t MemoryAllocatedBytes() { return blocks_memory_; }
private:
// Number of bytes allocated in one block
@ -72,7 +70,7 @@ class ArenaImpl : public Arena {
size_t blocks_memory_ = 0;
};
inline char* ArenaImpl::Allocate(size_t bytes) {
inline char* Arena::Allocate(size_t bytes) {
// The semantics of what to return are a bit messy if we allow
// 0-byte allocations, so we disallow them here (we don't need
// them for our internal use).

@ -7,34 +7,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/arena_impl.h"
#include "util/arena.h"
#include "util/random.h"
#include "util/testharness.h"
namespace rocksdb {
class ArenaImplTest { };
class ArenaTest {};
TEST(ArenaImplTest, Empty) {
ArenaImpl arena0;
}
TEST(ArenaTest, Empty) { Arena arena0; }
TEST(ArenaImplTest, MemoryAllocatedBytes) {
TEST(ArenaTest, MemoryAllocatedBytes) {
const int N = 17;
size_t req_sz; //requested size
size_t req_sz; // requested size
size_t bsz = 8192; // block size
size_t expected_memory_allocated;
ArenaImpl arena_impl(bsz);
Arena arena(bsz);
// requested size > quarter of a block:
// allocate requested size separately
req_sz = 3001;
for (int i = 0; i < N; i++) {
arena_impl.Allocate(req_sz);
arena.Allocate(req_sz);
}
expected_memory_allocated = req_sz * N;
ASSERT_EQ(arena_impl.MemoryAllocatedBytes(), expected_memory_allocated);
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
// requested size < quarter of a block:
// allocate a block with the default size, then try to use unused part
@ -42,28 +40,28 @@ TEST(ArenaImplTest, MemoryAllocatedBytes) {
// Allocate(99) call. All the remaining calls won't lead to new allocation.
req_sz = 99;
for (int i = 0; i < N; i++) {
arena_impl.Allocate(req_sz);
arena.Allocate(req_sz);
}
expected_memory_allocated += bsz;
ASSERT_EQ(arena_impl.MemoryAllocatedBytes(), expected_memory_allocated);
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
// requested size > quarter of a block:
// allocate requested size separately
req_sz = 99999999;
for (int i = 0; i < N; i++) {
arena_impl.Allocate(req_sz);
arena.Allocate(req_sz);
}
expected_memory_allocated += req_sz * N;
ASSERT_EQ(arena_impl.MemoryAllocatedBytes(), expected_memory_allocated);
ASSERT_EQ(arena.MemoryAllocatedBytes(), expected_memory_allocated);
}
// Make sure we didn't count the allocate but not used memory space in
// Arena::ApproximateMemoryUsage()
TEST(ArenaImplTest, ApproximateMemoryUsageTest) {
TEST(ArenaTest, ApproximateMemoryUsageTest) {
const size_t kBlockSize = 4096;
const size_t kEntrySize = kBlockSize / 8;
const size_t kZero = 0;
ArenaImpl arena(kBlockSize);
const size_t kZero = 0;
Arena arena(kBlockSize);
ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
auto num_blocks = kBlockSize / kEntrySize;
@ -83,9 +81,9 @@ TEST(ArenaImplTest, ApproximateMemoryUsageTest) {
ASSERT_GT(usage, mem_usage);
}
TEST(ArenaImplTest, Simple) {
TEST(ArenaTest, Simple) {
std::vector<std::pair<size_t, char*>> allocated;
ArenaImpl arena_impl;
Arena arena;
const int N = 100000;
size_t bytes = 0;
Random rnd(301);
@ -104,9 +102,9 @@ TEST(ArenaImplTest, Simple) {
}
char* r;
if (rnd.OneIn(10)) {
r = arena_impl.AllocateAligned(s);
r = arena.AllocateAligned(s);
} else {
r = arena_impl.Allocate(s);
r = arena.Allocate(s);
}
for (unsigned int b = 0; b < s; b++) {
@ -115,9 +113,9 @@ TEST(ArenaImplTest, Simple) {
}
bytes += s;
allocated.push_back(std::make_pair(s, r));
ASSERT_GE(arena_impl.ApproximateMemoryUsage(), bytes);
ASSERT_GE(arena.ApproximateMemoryUsage(), bytes);
if (i > N / 10) {
ASSERT_LE(arena_impl.ApproximateMemoryUsage(), bytes * 1.10);
ASSERT_LE(arena.ApproximateMemoryUsage(), bytes * 1.10);
}
}
for (unsigned int i = 0; i < allocated.size(); i++) {
@ -132,6 +130,4 @@ TEST(ArenaImplTest, Simple) {
} // namespace rocksdb
int main(int argc, char** argv) {
return rocksdb::test::RunAllTests();
}
int main(int argc, char** argv) { return rocksdb::test::RunAllTests(); }

@ -7,7 +7,7 @@
#include "util/hash_linklist_rep.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/arena.h"
#include "util/arena.h"
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "port/port.h"

@ -7,7 +7,7 @@
#include "util/hash_skiplist_rep.h"
#include "rocksdb/memtablerep.h"
#include "rocksdb/arena.h"
#include "util/arena.h"
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "port/port.h"

@ -11,7 +11,7 @@
#include <algorithm>
#include <type_traits>
#include "rocksdb/arena.h"
#include "util/arena.h"
#include "db/memtable.h"
#include "port/port.h"
#include "util/mutexlock.h"

Loading…
Cancel
Save