Suppress unused warnings

Summary:
- Use `__unused__` everywhere
- Suppress unused warnings in Release mode
    + This currently affects non-MSVC builds (e.g. mingw64).
Closes https://github.com/facebook/rocksdb/pull/3448

Differential Revision: D6885496

Pulled By: miasantreble

fbshipit-source-id: f2f6adacec940cc3851a9eee328fafbf61aad211
main
Tamir Duberstein 6 years ago committed by Facebook Github Bot
parent a247617e6f
commit cd5092e168
  1. 4
      db/column_family.cc
  2. 2
      db/db_impl_compaction_flush.cc
  3. 2
      db/version_set.cc
  4. 2
      env/io_posix.cc
  5. 2
      monitoring/thread_status_updater.cc
  6. 2
      monitoring/thread_status_updater_debug.cc
  7. 3
      port/win/env_win.cc
  8. 8
      port/win/io_win.cc
  9. 20
      tools/db_bench_tool.cc
  10. 30
      tools/db_stress.cc
  11. 2
      utilities/document/document_db.cc
  12. 10
      utilities/document/json_document.cc
  13. 2
      utilities/lua/rocks_lua_compaction_filter.cc
  14. 2
      utilities/transactions/transaction_base.cc

@ -469,7 +469,7 @@ ColumnFamilyData::~ColumnFamilyData() {
local_sv_.reset();
super_version_->db_mutex->Lock();
bool is_last_reference __attribute__((unused));
bool is_last_reference __attribute__((__unused__));
is_last_reference = super_version_->Unref();
assert(is_last_reference);
super_version_->Cleanup();
@ -480,7 +480,7 @@ ColumnFamilyData::~ColumnFamilyData() {
if (dummy_versions_ != nullptr) {
// List must be empty
assert(dummy_versions_->TEST_Next() == dummy_versions_);
bool deleted __attribute__((unused));
bool deleted __attribute__((__unused__));
deleted = dummy_versions_->Unref();
assert(deleted);
}

@ -1695,7 +1695,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
env_->Schedule(&DBImpl::BGWorkBottomCompaction, ca, Env::Priority::BOTTOM,
this, &DBImpl::UnscheduleCallback);
} else {
int output_level __attribute__((unused));
int output_level __attribute__((__unused__));
output_level = c->output_level();
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:NonTrivial",
&output_level);

@ -1976,7 +1976,7 @@ void VersionStorageInfo::ExtendFileRangeOverlappingInterval(
#endif
*start_index = mid_index + 1;
*end_index = mid_index;
int count __attribute__((unused));
int count __attribute__((__unused__));
count = 0;
// check backwards from 'mid' to lower indices

2
env/io_posix.cc vendored

@ -799,7 +799,7 @@ Status PosixWritableFile::Close() {
// trim the extra space preallocated at the end of the file
// NOTE(ljin): we probably don't want to surface failure as an IOError,
// but it will be nice to log these errors.
int dummy __attribute__((unused));
int dummy __attribute__((__unused__));
dummy = ftruncate(fd_, filesize_);
#if defined(ROCKSDB_FALLOCATE_PRESENT) && !defined(TRAVIS)
// in some file systems, ftruncate only trims trailing space if the

@ -252,7 +252,7 @@ void ThreadStatusUpdater::EraseColumnFamilyInfo(const void* cf_key) {
ConstantColumnFamilyInfo& cf_info = cf_pair->second;
auto db_pair = db_key_map_.find(cf_info.db_key);
assert(db_pair != db_key_map_.end());
size_t result __attribute__((unused));
size_t result __attribute__((__unused__));
result = db_pair->second.erase(cf_key);
assert(result);
cf_info_map_.erase(cf_pair);

@ -21,7 +21,7 @@ void ThreadStatusUpdater::TEST_VerifyColumnFamilyInfoMap(
}
for (auto* handle : handles) {
auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(handle)->cfd();
auto iter __attribute__((unused)) = cf_info_map_.find(cfd);
auto iter __attribute__((__unused__)) = cf_info_map_.find(cfd);
if (check_exist) {
assert(iter != cf_info_map_.end());
assert(iter->second.cf_name == cfd->GetName());

@ -74,8 +74,7 @@ WinEnvIO::WinEnvIO(Env* hosted_env)
{
LARGE_INTEGER qpf;
// No init as the compiler complains about unused var
BOOL ret;
BOOL ret __attribute__((__unused__));
ret = QueryPerformanceFrequency(&qpf);
assert(ret == TRUE);
perf_counter_frequency_ = qpf.QuadPart;

@ -192,8 +192,8 @@ WinMmapReadableFile::WinMmapReadableFile(const std::string& fileName,
length_(length) {}
WinMmapReadableFile::~WinMmapReadableFile() {
BOOL ret = ::UnmapViewOfFile(mapped_region_);
(void)ret;
BOOL ret __attribute__((__unused__));
ret = ::UnmapViewOfFile(mapped_region_);
assert(ret);
ret = ::CloseHandle(hMap_);
@ -279,7 +279,7 @@ Status WinMmapFile::MapNewRegion() {
if (hMap_ != NULL) {
// Unmap the previous one
BOOL ret;
BOOL ret __attribute__((__unused__));
ret = ::CloseHandle(hMap_);
assert(ret);
hMap_ = NULL;
@ -1023,7 +1023,7 @@ Status WinDirectory::Fsync() { return Status::OK(); }
/// WinFileLock
WinFileLock::~WinFileLock() {
BOOL ret;
BOOL ret __attribute__((__unused__));
ret = ::CloseHandle(hFile_);
assert(ret);
}

@ -350,7 +350,7 @@ DEFINE_uint64(subcompactions, 1,
"Maximum number of subcompactions to divide L0-L1 compactions "
"into.");
static const bool FLAGS_subcompactions_dummy
__attribute__((unused)) = RegisterFlagValidator(&FLAGS_subcompactions,
__attribute__((__unused__)) = RegisterFlagValidator(&FLAGS_subcompactions,
&ValidateUint32Range);
DEFINE_int32(max_background_flushes,
@ -766,7 +766,7 @@ static bool ValidateCompressionLevel(const char* flagname, int32_t value) {
return true;
}
static const bool FLAGS_compression_level_dummy __attribute__((unused)) =
static const bool FLAGS_compression_level_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_compression_level, &ValidateCompressionLevel);
DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts"
@ -1037,31 +1037,31 @@ DEFINE_int32(skip_list_lookahead, 0, "Used with skip_list memtablerep; try "
DEFINE_bool(report_file_operations, false, "if report number of file "
"operations");
static const bool FLAGS_soft_rate_limit_dummy __attribute__((unused)) =
static const bool FLAGS_soft_rate_limit_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_soft_rate_limit, &ValidateRateLimit);
static const bool FLAGS_hard_rate_limit_dummy __attribute__((unused)) =
static const bool FLAGS_hard_rate_limit_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_hard_rate_limit, &ValidateRateLimit);
static const bool FLAGS_prefix_size_dummy __attribute__((unused)) =
static const bool FLAGS_prefix_size_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize);
static const bool FLAGS_key_size_dummy __attribute__((unused)) =
static const bool FLAGS_key_size_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_key_size, &ValidateKeySize);
static const bool FLAGS_cache_numshardbits_dummy __attribute__((unused)) =
static const bool FLAGS_cache_numshardbits_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_cache_numshardbits,
&ValidateCacheNumshardbits);
static const bool FLAGS_readwritepercent_dummy __attribute__((unused)) =
static const bool FLAGS_readwritepercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_readwritepercent, &ValidateInt32Percent);
DEFINE_int32(disable_seek_compaction, false,
"Not used, left here for backwards compatibility");
static const bool FLAGS_deletepercent_dummy __attribute__((unused)) =
static const bool FLAGS_deletepercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_deletepercent, &ValidateInt32Percent);
static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((unused)) =
static const bool FLAGS_table_cache_numshardbits_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_table_cache_numshardbits,
&ValidateTableCacheNumshardbits);

@ -90,7 +90,7 @@ static bool ValidateUint32Range(const char* flagname, uint64_t value) {
}
DEFINE_uint64(seed, 2341234, "Seed for PRNG");
static const bool FLAGS_seed_dummy __attribute__((unused)) =
static const bool FLAGS_seed_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_seed, &ValidateUint32Range);
DEFINE_int64(max_key, 1 * KB* KB,
@ -270,7 +270,7 @@ DEFINE_bool(allow_concurrent_memtable_write, false,
DEFINE_bool(enable_write_thread_adaptive_yield, true,
"Use a yielding spin loop for brief writer thread waits.");
static const bool FLAGS_subcompactions_dummy __attribute__((unused)) =
static const bool FLAGS_subcompactions_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_subcompactions, &ValidateUint32Range);
static bool ValidateInt32Positive(const char* flagname, int32_t value) {
@ -282,7 +282,7 @@ static bool ValidateInt32Positive(const char* flagname, int32_t value) {
return true;
}
DEFINE_int32(reopen, 10, "Number of times database reopens");
static const bool FLAGS_reopen_dummy __attribute__((unused)) =
static const bool FLAGS_reopen_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_reopen, &ValidateInt32Positive);
DEFINE_int32(bloom_bits, 10, "Bloom filter bits per key. "
@ -320,7 +320,7 @@ DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync");
DEFINE_int32(kill_random_test, 0,
"If non-zero, kill at various points in source code with "
"probability 1/this");
static const bool FLAGS_kill_random_test_dummy __attribute__((unused)) =
static const bool FLAGS_kill_random_test_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_kill_random_test, &ValidateInt32Positive);
extern int rocksdb_kill_odds;
@ -380,29 +380,29 @@ static bool ValidateInt32Percent(const char* flagname, int32_t value) {
DEFINE_int32(readpercent, 10,
"Ratio of reads to total workload (expressed as a percentage)");
static const bool FLAGS_readpercent_dummy __attribute__((unused)) =
static const bool FLAGS_readpercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_readpercent, &ValidateInt32Percent);
DEFINE_int32(prefixpercent, 20,
"Ratio of prefix iterators to total workload (expressed as a"
" percentage)");
static const bool FLAGS_prefixpercent_dummy __attribute__((unused)) =
static const bool FLAGS_prefixpercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_prefixpercent, &ValidateInt32Percent);
DEFINE_int32(writepercent, 45,
"Ratio of writes to total workload (expressed as a percentage)");
static const bool FLAGS_writepercent_dummy __attribute__((unused)) =
static const bool FLAGS_writepercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_writepercent, &ValidateInt32Percent);
DEFINE_int32(delpercent, 15,
"Ratio of deletes to total workload (expressed as a percentage)");
static const bool FLAGS_delpercent_dummy __attribute__((unused)) =
static const bool FLAGS_delpercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_delpercent, &ValidateInt32Percent);
DEFINE_int32(delrangepercent, 0,
"Ratio of range deletions to total workload (expressed as a "
"percentage). Cannot be used with test_batches_snapshots");
static const bool FLAGS_delrangepercent_dummy __attribute__((unused)) =
static const bool FLAGS_delrangepercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_delrangepercent, &ValidateInt32Percent);
DEFINE_int32(nooverwritepercent, 60,
@ -413,11 +413,11 @@ static const bool FLAGS_nooverwritepercent_dummy __attribute__((__unused__)) =
DEFINE_int32(iterpercent, 10, "Ratio of iterations to total workload"
" (expressed as a percentage)");
static const bool FLAGS_iterpercent_dummy __attribute__((unused)) =
static const bool FLAGS_iterpercent_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_iterpercent, &ValidateInt32Percent);
DEFINE_uint64(num_iterations, 10, "Number of iterations per MultiIterate run");
static const bool FLAGS_num_iterations_dummy __attribute__((unused)) =
static const bool FLAGS_num_iterations_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_num_iterations, &ValidateUint32Range);
namespace {
@ -494,11 +494,11 @@ DEFINE_string(hdfs, "", "Name of hdfs environment");
static rocksdb::Env* FLAGS_env = rocksdb::Env::Default();
DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
static const bool FLAGS_ops_per_thread_dummy __attribute__((unused)) =
static const bool FLAGS_ops_per_thread_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_ops_per_thread, &ValidateUint32Range);
DEFINE_uint64(log2_keys_per_lock, 2, "Log2 of number of keys per lock");
static const bool FLAGS_log2_keys_per_lock_dummy __attribute__((unused)) =
static const bool FLAGS_log2_keys_per_lock_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_log2_keys_per_lock, &ValidateUint32Range);
DEFINE_bool(in_place_update, false, "On true, does inplace update in memtable");
@ -537,7 +537,7 @@ static bool ValidatePrefixSize(const char* flagname, int32_t value) {
return true;
}
DEFINE_int32(prefix_size, 7, "Control the prefix size for HashSkipListRep");
static const bool FLAGS_prefix_size_dummy __attribute__((unused)) =
static const bool FLAGS_prefix_size_dummy __attribute__((__unused__)) =
RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize);
DEFINE_bool(use_merge, false, "On true, replaces all writes with a Merge "
@ -1789,7 +1789,7 @@ class StressTest {
cf, new_name.c_str());
}
thread->shared->LockColumnFamily(cf);
Status s __attribute__((unused));
Status s __attribute__((__unused__));
s = db_->DropColumnFamily(column_families_[cf]);
delete column_families_[cf];
if (!s.ok()) {

@ -922,7 +922,7 @@ class DocumentDBImpl : public DocumentDB {
for (const auto& update : updates.Items()) {
if (update.first == "$set") {
JSONDocumentBuilder builder;
bool res __attribute__((unused)) = builder.WriteStartObject();
bool res __attribute__((__unused__)) = builder.WriteStartObject();
assert(res);
for (const auto& itr : update.second.Items()) {
if (itr.first == kPrimaryKey) {

@ -46,9 +46,9 @@ void InitJSONDocument(std::unique_ptr<char[]>* data,
Func f) {
// TODO(stash): maybe add function to FbsonDocument to avoid creating array?
fbson::FbsonWriter writer;
bool res __attribute__((unused)) = writer.writeStartArray();
bool res __attribute__((__unused__)) = writer.writeStartArray();
assert(res);
uint32_t bytesWritten __attribute__((unused));
uint32_t bytesWritten __attribute__((__unused__));
bytesWritten = f(writer);
assert(bytesWritten != 0);
res = writer.writeEndArray();
@ -68,7 +68,7 @@ void InitString(std::unique_ptr<char[]>* data,
const std::string& s) {
InitJSONDocument(data, value, std::bind(
[](fbson::FbsonWriter& writer, const std::string& str) -> uint32_t {
bool res __attribute__((unused)) = writer.writeStartString();
bool res __attribute__((__unused__)) = writer.writeStartString();
assert(res);
auto bytesWritten = writer.writeString(str.c_str(),
static_cast<uint32_t>(str.length()));
@ -114,7 +114,7 @@ bool IsComparable(fbson::FbsonValue* left, fbson::FbsonValue* right) {
void CreateArray(std::unique_ptr<char[]>* data, fbson::FbsonValue** value) {
fbson::FbsonWriter writer;
bool res __attribute__((unused)) = writer.writeStartArray();
bool res __attribute__((__unused__)) = writer.writeStartArray();
assert(res);
res = writer.writeEndArray();
assert(res);
@ -127,7 +127,7 @@ void CreateArray(std::unique_ptr<char[]>* data, fbson::FbsonValue** value) {
void CreateObject(std::unique_ptr<char[]>* data, fbson::FbsonValue** value) {
fbson::FbsonWriter writer;
bool res __attribute__((unused)) = writer.writeStartObject();
bool res __attribute__((__unused__)) = writer.writeStartObject();
assert(res);
res = writer.writeEndObject();
assert(res);

@ -158,7 +158,7 @@ const char* RocksLuaCompactionFilter::Name() const {
"return value is not a string while string is expected");
} else {
const char* name_buf = lua_tostring(lua_state, -1);
const size_t name_size __attribute__((unused)) = lua_strlen(lua_state, -1);
const size_t name_size __attribute__((__unused__)) = lua_strlen(lua_state, -1);
assert(name_buf[name_size] == '\0');
assert(strlen(name_buf) <= name_size);
name_ = name_buf;

@ -623,7 +623,7 @@ void TransactionBaseImpl::UndoGetForUpdate(ColumnFamilyHandle* column_family,
auto& cf_tracked_keys = tracked_keys_[column_family_id];
std::string key_str = key.ToString();
bool can_decrement = false;
bool can_unlock __attribute__((unused)) = false;
bool can_unlock __attribute__((__unused__)) = false;
if (save_points_ != nullptr && !save_points_->empty()) {
// Check if this key was fetched ForUpdate in this SavePoint

Loading…
Cancel
Save