diff --git a/db/db_iter.cc b/db/db_iter.cc index db86ebc2c..815562c9f 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -287,7 +287,6 @@ void DBIter::MergeValuesNewToOld() { std::deque operands; operands.push_front(iter_->value().ToString()); - std::string merge_result; // Temporary string to hold merge result later ParsedInternalKey ikey; for (iter_->Next(); iter_->Valid(); iter_->Next()) { if (!ParseKey(&ikey)) { diff --git a/db/db_test.cc b/db/db_test.cc index 7c2f051d0..d402a3578 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -4684,9 +4684,9 @@ TEST(DBTest, CompactionFilterContextManual) { ASSERT_EQ(NumTableFilesAtLevel(0), 1); // Verify total number of keys is correct after manual compaction. - int count = 0; - int total = 0; { + int count = 0; + int total = 0; Arena arena; ScopedArenaIterator iter(dbfull()->TEST_NewInternalIterator(&arena)); iter->SeekToFirst(); @@ -8205,7 +8205,6 @@ static void RandomTimeoutWriter(void* arg) { if (write_opt.timeout_hint_us == 0 || put_duration + kTimerBias < write_opt.timeout_hint_us) { ASSERT_OK(s); - std::string result; } if (s.IsTimedOut()) { timeout_count++; diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index a5af31284..f1cd4b040 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -148,7 +148,6 @@ class DeleteFileTest { TEST(DeleteFileTest, AddKeysAndQueryLevels) { CreateTwoLevels(); std::vector metadata; - std::vector keysinlevel; db_->GetLiveFilesMetaData(&metadata); std::string level1file = ""; diff --git a/db/version_set.cc b/db/version_set.cc index a092277fa..78241d1f0 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1219,7 +1219,7 @@ bool Version::HasOverlappingUserKey( // Check the last file in inputs against the file after it size_t last_file = FindFile(cfd_->internal_comparator(), file_level, inputs->back()->largest.Encode()); - assert(0 <= last_file && last_file < kNumFiles); // File should exist! + assert(last_file < kNumFiles); // File should exist! if (last_file < kNumFiles-1) { // If not the last file const Slice last_key_in_input = ExtractUserKey( files[last_file].largest_key); @@ -1234,7 +1234,7 @@ bool Version::HasOverlappingUserKey( // Check the first file in inputs against the file just before it size_t first_file = FindFile(cfd_->internal_comparator(), file_level, inputs->front()->smallest.Encode()); - assert(0 <= first_file && first_file <= last_file); // File should exist! + assert(first_file <= last_file); // File should exist! if (first_file > 0) { // If not first file const Slice& first_key_in_input = ExtractUserKey( files[first_file].smallest_key); diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index 2f373fff1..9e4328cd4 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -721,7 +721,6 @@ Status BlockBasedTableBuilder::Finish() { // Write properties block. { PropertyBlockBuilder property_block_builder; - std::vector failed_user_prop_collectors; r->props.filter_policy_name = r->table_options.filter_policy != nullptr ? r->table_options.filter_policy->Name() : ""; r->props.index_size = diff --git a/table/cuckoo_table_reader.cc b/table/cuckoo_table_reader.cc index f39900add..c0ca38bb7 100644 --- a/table/cuckoo_table_reader.cc +++ b/table/cuckoo_table_reader.cc @@ -193,7 +193,7 @@ class CuckooTableIterator : public Iterator { struct BucketComparator { BucketComparator(const Slice& file_data, const Comparator* ucomp, uint32_t bucket_len, uint32_t user_key_len, - const Slice target = Slice()) + const Slice& target = Slice()) : file_data_(file_data), ucomp_(ucomp), bucket_len_(bucket_len), diff --git a/table/format.cc b/table/format.cc index db11f9d4a..768e00165 100644 --- a/table/format.cc +++ b/table/format.cc @@ -334,9 +334,9 @@ Status UncompressBlockContents(const char* data, size_t n, case kZlibCompression: ubuf = std::unique_ptr( port::Zlib_Uncompress(data, n, &decompress_size)); - static char zlib_corrupt_msg[] = - "Zlib not supported or corrupted Zlib compressed block contents"; if (!ubuf) { + static char zlib_corrupt_msg[] = + "Zlib not supported or corrupted Zlib compressed block contents"; return Status::Corruption(zlib_corrupt_msg); } *contents = @@ -345,9 +345,9 @@ Status UncompressBlockContents(const char* data, size_t n, case kBZip2Compression: ubuf = std::unique_ptr( port::BZip2_Uncompress(data, n, &decompress_size)); - static char bzip2_corrupt_msg[] = - "Bzip2 not supported or corrupted Bzip2 compressed block contents"; if (!ubuf) { + static char bzip2_corrupt_msg[] = + "Bzip2 not supported or corrupted Bzip2 compressed block contents"; return Status::Corruption(bzip2_corrupt_msg); } *contents = @@ -356,9 +356,9 @@ Status UncompressBlockContents(const char* data, size_t n, case kLZ4Compression: ubuf = std::unique_ptr( port::LZ4_Uncompress(data, n, &decompress_size)); - static char lz4_corrupt_msg[] = - "LZ4 not supported or corrupted LZ4 compressed block contents"; if (!ubuf) { + static char lz4_corrupt_msg[] = + "LZ4 not supported or corrupted LZ4 compressed block contents"; return Status::Corruption(lz4_corrupt_msg); } *contents = @@ -367,9 +367,9 @@ Status UncompressBlockContents(const char* data, size_t n, case kLZ4HCCompression: ubuf = std::unique_ptr( port::LZ4_Uncompress(data, n, &decompress_size)); - static char lz4hc_corrupt_msg[] = - "LZ4HC not supported or corrupted LZ4HC compressed block contents"; if (!ubuf) { + static char lz4hc_corrupt_msg[] = + "LZ4HC not supported or corrupted LZ4HC compressed block contents"; return Status::Corruption(lz4hc_corrupt_msg); } *contents = diff --git a/util/cache_test.cc b/util/cache_test.cc index c12cdb7e1..74109ff0c 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -386,7 +386,7 @@ class Value { namespace { void deleter(const Slice& key, void* value) { - delete (Value *)value; + delete static_cast(value); } } // namespace diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index a585d1a9c..281837773 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -228,7 +228,7 @@ class FileManager : public EnvWrapper { public: explicit FileManager(Env* t) : EnvWrapper(t), rnd_(5) {} - Status DeleteRandomFileInDir(const std::string dir) { + Status DeleteRandomFileInDir(const std::string& dir) { std::vector children; GetChildren(dir, &children); if (children.size() <= 2) { // . and .. diff --git a/utilities/document/document_db.cc b/utilities/document/document_db.cc index 901e91163..b19618533 100644 --- a/utilities/document/document_db.cc +++ b/utilities/document/document_db.cc @@ -407,7 +407,6 @@ class SimpleSortedIndex : public Index { assert(interval != nullptr); // because index is useful Direction direction; - std::string op; const JSONDocument* limit; if (interval->lower_bound != nullptr) { limit = interval->lower_bound; diff --git a/utilities/document/document_db_test.cc b/utilities/document/document_db_test.cc index d4c632cce..5b36a2060 100644 --- a/utilities/document/document_db_test.cc +++ b/utilities/document/document_db_test.cc @@ -56,7 +56,7 @@ class DocumentDBTest { } } - JSONDocument* Parse(const std::string doc) { + JSONDocument* Parse(const std::string& doc) { return JSONDocument::ParseJSON(ConvertQuotes(doc).c_str()); } diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index 66cabe8e3..d1c1235c3 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -263,7 +263,7 @@ class TtlTest { class TestFilter : public CompactionFilter { public: - TestFilter(const int64_t kSampleSize, const std::string kNewValue) + TestFilter(const int64_t kSampleSize, const std::string& kNewValue) : kSampleSize_(kSampleSize), kNewValue_(kNewValue) { } @@ -311,7 +311,7 @@ class TtlTest { class TestFilterFactory : public CompactionFilterFactory { public: - TestFilterFactory(const int64_t kSampleSize, const std::string kNewValue) + TestFilterFactory(const int64_t kSampleSize, const std::string& kNewValue) : kSampleSize_(kSampleSize), kNewValue_(kNewValue) { }