Merge pull request #228 from miguelportilla/develop

Changes to support unity build:

Script for building the unity.cc file via Makefile
Unity executable Makefile target for testing builds
Source code changes to fix compilation of unity build
main
Siying Dong 10 years ago
commit 18efdba8d5
  1. 1
      .gitignore
  2. 8
      Makefile
  3. 78
      build_tools/unity
  4. 9
      db/column_family.cc
  5. 2
      db/compaction.cc
  6. 3
      db/compaction.h
  7. 16
      db/compaction_picker.cc
  8. 3
      db/compaction_picker.h
  9. 9
      db/db_impl.cc
  10. 7
      db/db_impl.h
  11. 28
      db/version_set.cc
  12. 3
      table/block_based_table_builder.cc
  13. 7
      table/block_prefix_index.cc
  14. 16
      table/merger.cc
  15. 3
      util/bloom.cc
  16. 3
      util/dynamic_bloom.cc
  17. 4
      util/hash.h
  18. 6
      util/options_builder.cc
  19. 4
      utilities/geodb/geodb_impl.cc

1
.gitignore vendored

@ -32,3 +32,4 @@ coverage/COVERAGE_REPORT
tags
java/*.log
java/include/org_rocksdb_*.h
unity.cc

@ -234,8 +234,14 @@ valgrind_check: all $(PROGRAMS) $(TESTS)
echo $$t $$((etime - stime)) >> $(VALGRIND_DIR)/valgrind_tests_times; \
done
unity.cc:
$(shell (export ROCKSDB_ROOT="$(CURDIR)"; "$(CURDIR)/build_tools/unity" "$(CURDIR)/unity.cc"))
unity: unity.cc unity.o
$(CXX) unity.o $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS)
clean:
-rm -f $(PROGRAMS) $(TESTS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) build_config.mk
-rm -f $(PROGRAMS) $(TESTS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) build_config.mk unity.cc
-rm -rf ios-x86/* ios-arm/*
-find . -name "*.[od]" -exec rm {} \;
-find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;

@ -0,0 +1,78 @@
#!/bin/sh
#
# Create the unity file
#
OUTPUT=$1
if test -z "$OUTPUT"; then
echo "usage: $0 <output-filename>" >&2
exit 1
fi
# Delete existing file, if it exists
rm -f "$OUTPUT"
touch "$OUTPUT"
# Detect OS
if test -z "$TARGET_OS"; then
TARGET_OS=`uname -s`
fi
# generic port files (working on all platform by #ifdef) go directly in /port
GENERIC_PORT_FILES=`cd "$ROCKSDB_ROOT"; find port -name '*.cc' | tr "\n" " "`
# On GCC, we pick libc's memcmp over GCC's memcmp via -fno-builtin-memcmp
case "$TARGET_OS" in
Darwin)
# PORT_FILES=port/darwin/darwin_specific.cc
;;
IOS)
;;
Linux)
# PORT_FILES=port/linux/linux_specific.cc
;;
SunOS)
# PORT_FILES=port/sunos/sunos_specific.cc
;;
FreeBSD)
# PORT_FILES=port/freebsd/freebsd_specific.cc
;;
NetBSD)
# PORT_FILES=port/netbsd/netbsd_specific.cc
;;
OpenBSD)
# PORT_FILES=port/openbsd/openbsd_specific.cc
;;
DragonFly)
# PORT_FILES=port/dragonfly/dragonfly_specific.cc
;;
OS_ANDROID_CROSSCOMPILE)
# PORT_FILES=port/android/android.cc
;;
*)
echo "Unknown platform!" >&2
exit 1
esac
# We want to make a list of all cc files within util, db, table, and helpers
# except for the test and benchmark files. By default, find will output a list
# of all files matching either rule, so we need to append -print to make the
# prune take effect.
DIRS="util db table utilities"
set -f # temporarily disable globbing so that our patterns arent expanded
PRUNE_TEST="-name *test*.cc -prune"
PRUNE_BENCH="-name *bench*.cc -prune"
PORTABLE_FILES=`cd "$ROCKSDB_ROOT"; find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o -name '*.cc' -print | sort`
PORTABLE_CPP=`cd "$ROCKSDB_ROOT"; find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o -name '*.cpp' -print | sort`
set +f # re-enable globbing
# The sources consist of the portable files, plus the platform-specific port
# file.
for SOURCE_FILE in $PORTABLE_FILES $GENERIC_PORT_FILES $PORT_FILES $PORTABLE_CPP
do
echo "#include <$SOURCE_FILE>" >> "$OUTPUT"
done
echo "int main(int argc, char** argv){ return 0; }" >> "$OUTPUT"

@ -49,15 +49,6 @@ ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() {
uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); }
namespace {
// Fix user-supplied options to be reasonable
template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}
} // anonymous namespace
ColumnFamilyOptions SanitizeOptions(const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,
const ColumnFamilyOptions& src) {

@ -18,7 +18,7 @@
namespace rocksdb {
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->fd.GetFileSize();

@ -231,4 +231,7 @@ class Compaction {
void ResetNextCompactionIndex();
};
// Utility function
extern uint64_t TotalFileSize(const std::vector<FileMetaData*>& files);
} // namespace rocksdb

@ -18,6 +18,14 @@
namespace rocksdb {
uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->compensated_file_size;
}
return sum;
}
namespace {
// Determine compression type, based on user options, level of the output
// file and whether compression is disabled.
@ -45,14 +53,6 @@ CompressionType GetCompressionType(const Options& options, int level,
}
}
uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->compensated_file_size;
}
return sum;
}
// Multiple two operands. If they overflow, return op1.
uint64_t MultiplyCheckOverflow(uint64_t op1, int op2) {
if (op1 == 0) {

@ -204,4 +204,7 @@ class FIFOCompactionPicker : public CompactionPicker {
}
};
// Utility function
extern uint64_t TotalCompensatedFileSize(const std::vector<FileMetaData*>& files);
} // namespace rocksdb

@ -231,15 +231,6 @@ struct DBImpl::CompactionState {
}
};
namespace {
// Fix user-supplied options to be reasonable
template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}
} // anonymous namespace
Options SanitizeOptions(const std::string& dbname,
const InternalKeyComparator* icmp,
const InternalFilterPolicy* ipolicy,

@ -647,4 +647,11 @@ extern Options SanitizeOptions(const std::string& db,
const Options& src);
extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
// Fix user-supplied options to be reasonable
template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}
} // namespace rocksdb

@ -301,23 +301,6 @@ class FilePicker {
};
} // anonymous namespace
static uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->fd.GetFileSize();
}
return sum;
}
static uint64_t TotalCompensatedFileSize(
const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
sum += files[i]->compensated_file_size;
}
return sum;
}
Version::~Version() {
assert(refs_ == 0);
@ -666,7 +649,6 @@ void Version::AddIterators(const ReadOptions& read_options,
}
// Callback from TableCache::Get()
namespace {
enum SaverState {
kNotFound,
kFound,
@ -674,6 +656,8 @@ enum SaverState {
kCorrupt,
kMerge // saver contains the current merge result (the operands)
};
namespace version_set {
struct Saver {
SaverState state;
const Comparator* ucmp;
@ -686,7 +670,7 @@ struct Saver {
Logger* logger;
Statistics* statistics;
};
}
} // namespace version_set
// Called from TableCache::Get and Table::Get when file/block in which
// key may exist are not there in TableCache/BlockCache respectively. In this
@ -694,7 +678,7 @@ struct Saver {
// IO to be certain.Set the status=kFound and value_found=false to let the
// caller know that key may exist but is not there in memory
static void MarkKeyMayExist(void* arg) {
Saver* s = reinterpret_cast<Saver*>(arg);
version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
s->state = kFound;
if (s->value_found != nullptr) {
*(s->value_found) = false;
@ -703,7 +687,7 @@ static void MarkKeyMayExist(void* arg) {
static bool SaveValue(void* arg, const ParsedInternalKey& parsed_key,
const Slice& v) {
Saver* s = reinterpret_cast<Saver*>(arg);
version_set::Saver* s = reinterpret_cast<version_set::Saver*>(arg);
MergeContext* merge_contex = s->merge_context;
std::string merge_result; // temporary area for merge results later
@ -817,7 +801,7 @@ void Version::Get(const ReadOptions& options,
Slice user_key = k.user_key();
assert(status->ok() || status->IsMergeInProgress());
Saver saver;
version_set::Saver saver;
saver.state = status->ok()? kNotFound : kMerge;
saver.ucmp = user_comparator_;
saver.user_key = user_key;

@ -45,7 +45,6 @@ namespace rocksdb {
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
namespace {
typedef BlockBasedTableOptions::IndexType IndexType;
@ -335,8 +334,6 @@ Slice CompressBlock(const Slice& raw,
return raw;
}
} // anonymous namespace
// kBlockBasedTableMagicNumber was picked by running
// echo rocksdb.table.block_based | sha1sum
// and taking the leading 64 bits.

@ -16,8 +16,6 @@
namespace rocksdb {
namespace {
inline uint32_t Hash(const Slice& s) {
return rocksdb::Hash(s.data(), s.size(), 0);
}
@ -26,8 +24,6 @@ inline uint32_t PrefixToBucket(const Slice& prefix, uint32_t num_buckets) {
return Hash(prefix) % num_buckets;
}
// The prefix block index is simply a bucket array, with each entry pointing to
// the blocks that span the prefixes hashed to this bucket.
//
@ -64,7 +60,6 @@ inline uint32_t EncodeIndex(uint32_t index) {
return index | kBlockArrayMask;
}
// temporary storage for prefix information during index building
struct PrefixRecord {
Slice prefix;
@ -74,8 +69,6 @@ struct PrefixRecord {
PrefixRecord* next;
};
} // anonymous namespace
class BlockPrefixIndex::Builder {
public:
explicit Builder(const SliceTransform* internal_prefix_extractor)

@ -23,7 +23,7 @@
#include "util/autovector.h"
namespace rocksdb {
namespace {
namespace merger {
typedef std::priority_queue<
IteratorWrapper*,
std::vector<IteratorWrapper*>,
@ -43,7 +43,7 @@ MaxIterHeap NewMaxIterHeap(const Comparator* comparator) {
MinIterHeap NewMinIterHeap(const Comparator* comparator) {
return MinIterHeap(MinIteratorComparator(comparator));
}
} // namespace
} // namespace merger
const size_t kNumIterReserve = 4;
@ -56,8 +56,8 @@ class MergingIterator : public Iterator {
current_(nullptr),
use_heap_(true),
direction_(kForward),
maxHeap_(NewMaxIterHeap(comparator_)),
minHeap_(NewMinIterHeap(comparator_)) {
maxHeap_(merger::NewMaxIterHeap(comparator_)),
minHeap_(merger::NewMinIterHeap(comparator_)) {
children_.resize(n);
for (int i = 0; i < n; i++) {
children_[i].Set(children[i]);
@ -274,8 +274,8 @@ class MergingIterator : public Iterator {
kReverse
};
Direction direction_;
MaxIterHeap maxHeap_;
MinIterHeap minHeap_;
merger::MaxIterHeap maxHeap_;
merger::MinIterHeap minHeap_;
};
void MergingIterator::FindSmallest() {
@ -302,8 +302,8 @@ void MergingIterator::FindLargest() {
void MergingIterator::ClearHeaps() {
use_heap_ = true;
maxHeap_ = NewMaxIterHeap(comparator_);
minHeap_ = NewMinIterHeap(comparator_);
maxHeap_ = merger::NewMaxIterHeap(comparator_);
minHeap_ = merger::NewMinIterHeap(comparator_);
}
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n,

@ -15,9 +15,6 @@
namespace rocksdb {
namespace {
static uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
class BloomFilterPolicy : public FilterPolicy {
private:

@ -14,9 +14,6 @@
namespace rocksdb {
namespace {
static uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
uint32_t GetTotalBitsForLocality(uint32_t total_bits) {
uint32_t num_blocks =

@ -17,6 +17,10 @@ namespace rocksdb {
extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
inline uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
inline uint32_t GetSliceHash(const Slice& s) {
return Hash(s.data(), s.size(), 397);
}

@ -25,7 +25,7 @@ CompactionStyle PickCompactionStyle(size_t write_buffer_size,
// Otherwise, calculate a score based on threshold and expected value of
// two styles, weighing reads 4X important than writes.
int expected_levels = static_cast<int>(ceil(
log(target_db_size / write_buffer_size) / log(kBytesForLevelMultiplier)));
::log(target_db_size / write_buffer_size) / ::log(kBytesForLevelMultiplier)));
int expected_max_files_universal =
static_cast<int>(ceil(log2(target_db_size / write_buffer_size)));
@ -111,8 +111,8 @@ void OptimizeForLevel(int read_amplification_threshold,
int write_amplification_threshold,
uint64_t target_db_size, Options* options) {
int expected_levels_one_level0_file =
static_cast<int>(ceil(log(target_db_size / options->write_buffer_size) /
log(kBytesForLevelMultiplier)));
static_cast<int>(ceil(::log(target_db_size / options->write_buffer_size) /
::log(kBytesForLevelMultiplier)));
int level0_stop_writes_trigger =
read_amplification_threshold - expected_levels_one_level0_file;

@ -307,7 +307,7 @@ Status GeoDBImpl::searchQuadIds(const GeoPosition& position,
// how many level of details to look for
int numberOfTilesAtMaxDepth = floor((bottomRight.x - topLeft.x) / 256);
int zoomLevelsToRise = floor(log(numberOfTilesAtMaxDepth) / log(2));
int zoomLevelsToRise = floor(::log(numberOfTilesAtMaxDepth) / ::log(2));
zoomLevelsToRise++;
int levels = std::max(0, Detail - zoomLevelsToRise);
@ -344,7 +344,7 @@ GeoDBImpl::Pixel GeoDBImpl::PositionToPixel(const GeoPosition& pos,
double latitude = clip(pos.latitude, MinLatitude, MaxLatitude);
double x = (pos.longitude + 180) / 360;
double sinLatitude = sin(latitude * PI / 180);
double y = 0.5 - log((1 + sinLatitude) / (1 - sinLatitude)) / (4 * PI);
double y = 0.5 - ::log((1 + sinLatitude) / (1 - sinLatitude)) / (4 * PI);
double mapSize = MapSize(levelOfDetail);
double X = floor(clip(x * mapSize + 0.5, 0, mapSize - 1));
double Y = floor(clip(y * mapSize + 0.5, 0, mapSize - 1));

Loading…
Cancel
Save