Fix CLANG errors introduced by 7d87f02799

Summary: Fix some CLANG errors introduced in 7d87f02799

Test Plan: Build with both of CLANG and gcc

Reviewers: rven, yhchiang, kradhakrishnan, anthony, IslamAbdelRahman, ngbronson

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D52329
main
sdong 9 years ago
parent 7fafd52dce
commit 11672df19a
  1. 4
      db/column_family.h
  2. 3
      db/db_impl.cc
  3. 1
      db/write_thread.cc
  4. 5
      util/concurrent_arena.cc
  5. 6
      util/concurrent_arena.h

@ -229,7 +229,7 @@ class ColumnFamilyData {
MemTable* mem() { return mem_; }
Version* current() { return current_; }
Version* dummy_versions() { return dummy_versions_; }
void SetCurrent(Version* current);
void SetCurrent(Version* _current);
uint64_t GetNumLiveVersions() const; // REQUIRE: DB mutex held
uint64_t GetTotalSstFilesSize() const; // REQUIRE: DB mutex held
void SetMemtable(MemTable* new_mem) { mem_ = new_mem; }
@ -531,7 +531,7 @@ class ColumnFamilyMemTablesImpl : public ColumnFamilyMemTables {
// Cannot be called while another thread is calling Seek().
// REQUIRES: use this function of DBImpl::column_family_memtables_ should be
// under a DB mutex OR from a write thread
virtual ColumnFamilyData* current() { return current_; }
virtual ColumnFamilyData* current() override { return current_; }
private:
ColumnFamilySet* column_family_set_;

@ -4387,7 +4387,8 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
pg.leader = &w;
pg.last_writer = last_writer;
pg.early_exit_allowed = !need_log_sync;
pg.running.store(write_batch_group.size(), std::memory_order_relaxed);
pg.running.store(static_cast<uint32_t>(write_batch_group.size()),
std::memory_order_relaxed);
write_thread_.LaunchParallelFollowers(&pg, current_sequence);
ColumnFamilyMemTablesImpl column_family_memtables(

@ -420,7 +420,6 @@ void WriteThread::ExitAsBatchGroupLeader(Writer* leader, Writer* last_writer,
void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) {
static AdaptationContext ctx{"EnterUnbatched"};
static std::atomic<uint32_t> adaptation_history{};
assert(w->batch == nullptr);
bool linked_as_leader;

@ -36,12 +36,13 @@ ConcurrentArena::Shard* ConcurrentArena::Repick() {
int cpuid = port::PhysicalCoreID();
if (UNLIKELY(cpuid < 0)) {
// cpu id unavailable, just pick randomly
cpuid = Random::GetTLSInstance()->Uniform(index_mask_ + 1);
cpuid =
Random::GetTLSInstance()->Uniform(static_cast<int>(index_mask_) + 1);
}
#if ROCKSDB_SUPPORT_THREAD_LOCAL
// even if we are cpu 0, use a non-zero tls_cpuid so we can tell we
// have repicked
tls_cpuid = cpuid | (index_mask_ + 1);
tls_cpuid = cpuid | (static_cast<int>(index_mask_) + 1);
#endif
return &shards_[cpuid & index_mask_];
}

@ -78,7 +78,7 @@ class ConcurrentArena : public Allocator {
private:
struct Shard {
char padding[40];
char padding[40] __attribute__((__unused__));
mutable SpinMutex mutex;
char* free_begin_;
std::atomic<size_t> allocated_and_unused_;
@ -92,7 +92,7 @@ class ConcurrentArena : public Allocator {
enum ZeroFirstEnum : uint32_t { tls_cpuid = 0 };
#endif
char padding0[56];
char padding0[56] __attribute__((__unused__));
size_t shard_block_size_;
@ -106,7 +106,7 @@ class ConcurrentArena : public Allocator {
std::atomic<size_t> memory_allocated_bytes_;
std::atomic<size_t> irregular_block_num_;
char padding1[56];
char padding1[56] __attribute__((__unused__));
Shard* Repick();

Loading…
Cancel
Save