diff --git a/db/column_family.h b/db/column_family.h index 4ba154779..25d5c2a15 100644 --- a/db/column_family.h +++ b/db/column_family.h @@ -229,7 +229,7 @@ class ColumnFamilyData { MemTable* mem() { return mem_; } Version* current() { return current_; } Version* dummy_versions() { return dummy_versions_; } - void SetCurrent(Version* current); + void SetCurrent(Version* _current); uint64_t GetNumLiveVersions() const; // REQUIRE: DB mutex held uint64_t GetTotalSstFilesSize() const; // REQUIRE: DB mutex held void SetMemtable(MemTable* new_mem) { mem_ = new_mem; } @@ -531,7 +531,7 @@ class ColumnFamilyMemTablesImpl : public ColumnFamilyMemTables { // Cannot be called while another thread is calling Seek(). // REQUIRES: use this function of DBImpl::column_family_memtables_ should be // under a DB mutex OR from a write thread - virtual ColumnFamilyData* current() { return current_; } + virtual ColumnFamilyData* current() override { return current_; } private: ColumnFamilySet* column_family_set_; diff --git a/db/db_impl.cc b/db/db_impl.cc index fb179e04b..020baa7e1 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -4387,7 +4387,8 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, pg.leader = &w; pg.last_writer = last_writer; pg.early_exit_allowed = !need_log_sync; - pg.running.store(write_batch_group.size(), std::memory_order_relaxed); + pg.running.store(static_cast(write_batch_group.size()), + std::memory_order_relaxed); write_thread_.LaunchParallelFollowers(&pg, current_sequence); ColumnFamilyMemTablesImpl column_family_memtables( diff --git a/db/write_thread.cc b/db/write_thread.cc index a5285ce99..5e43937ee 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -420,7 +420,6 @@ void WriteThread::ExitAsBatchGroupLeader(Writer* leader, Writer* last_writer, void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) { static AdaptationContext ctx{"EnterUnbatched"}; - static std::atomic adaptation_history{}; assert(w->batch == nullptr); bool linked_as_leader; diff --git a/util/concurrent_arena.cc b/util/concurrent_arena.cc index 027124871..fae09d7d2 100644 --- a/util/concurrent_arena.cc +++ b/util/concurrent_arena.cc @@ -36,12 +36,13 @@ ConcurrentArena::Shard* ConcurrentArena::Repick() { int cpuid = port::PhysicalCoreID(); if (UNLIKELY(cpuid < 0)) { // cpu id unavailable, just pick randomly - cpuid = Random::GetTLSInstance()->Uniform(index_mask_ + 1); + cpuid = + Random::GetTLSInstance()->Uniform(static_cast(index_mask_) + 1); } #if ROCKSDB_SUPPORT_THREAD_LOCAL // even if we are cpu 0, use a non-zero tls_cpuid so we can tell we // have repicked - tls_cpuid = cpuid | (index_mask_ + 1); + tls_cpuid = cpuid | (static_cast(index_mask_) + 1); #endif return &shards_[cpuid & index_mask_]; } diff --git a/util/concurrent_arena.h b/util/concurrent_arena.h index e3e1a3eb3..912c67da3 100644 --- a/util/concurrent_arena.h +++ b/util/concurrent_arena.h @@ -78,7 +78,7 @@ class ConcurrentArena : public Allocator { private: struct Shard { - char padding[40]; + char padding[40] __attribute__((__unused__)); mutable SpinMutex mutex; char* free_begin_; std::atomic allocated_and_unused_; @@ -92,7 +92,7 @@ class ConcurrentArena : public Allocator { enum ZeroFirstEnum : uint32_t { tls_cpuid = 0 }; #endif - char padding0[56]; + char padding0[56] __attribute__((__unused__)); size_t shard_block_size_; @@ -106,7 +106,7 @@ class ConcurrentArena : public Allocator { std::atomic memory_allocated_bytes_; std::atomic irregular_block_num_; - char padding1[56]; + char padding1[56] __attribute__((__unused__)); Shard* Repick();