Use std::numeric_limits<> (#9954)

Summary:
Right now we still don't fully use std::numeric_limits but use a macro, mainly for supporting VS 2013. Right now we only support VS 2017 and up so it is not a problem. The code comment claims that MinGW still needs it. We don't have a CI running MinGW so it's hard to validate. since we now require C++17, it's hard to imagine MinGW would still build RocksDB but doesn't support std::numeric_limits<>.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/9954

Test Plan: See CI Runs.

Reviewed By: riversand963

Differential Revision: D36173954

fbshipit-source-id: a35a73af17cdcae20e258cdef57fcf29a50b49e0
main
sdong 2 years ago committed by Facebook GitHub Bot
parent 46f8889b6a
commit 49628c9a83
  1. 7
      db/column_family.cc
  2. 4
      db/compaction/compaction.cc
  3. 5
      db/compaction/compaction_job.cc
  4. 4
      db/compaction/compaction_picker.cc
  5. 2
      db/compaction/compaction_picker_level.cc
  6. 4
      db/compaction/compaction_picker_test.cc
  7. 2
      db/compaction/compaction_picker_universal.cc
  8. 3
      db/db_compaction_test.cc
  9. 2
      db/db_filesnapshot.cc
  10. 2
      db/db_flush_test.cc
  11. 2
      db/db_impl/db_impl.cc
  12. 2
      db/db_impl/db_impl.h
  13. 9
      db/db_impl/db_impl_compaction_flush.cc
  14. 9
      db/db_impl/db_impl_debug.cc
  15. 4
      db/db_impl/db_impl_files.cc
  16. 7
      db/db_impl/db_impl_secondary.cc
  17. 4
      db/db_kv_checksum_test.cc
  18. 2
      db/db_memtable_test.cc
  19. 3
      db/db_range_del_test.cc
  20. 2
      db/db_wal_test.cc
  21. 3
      db/dbformat.h
  22. 2
      db/external_sst_file_test.cc
  23. 5
      db/file_indexer.h
  24. 46
      db/flush_job_test.cc
  25. 4
      db/memtable.cc
  26. 38
      db/memtable_list_test.cc
  27. 2
      db/version_builder.cc
  28. 2
      db/version_set.cc
  29. 6
      db/version_set.h
  30. 3
      db/wal_edit.h
  31. 12
      db/write_batch.cc
  32. 9
      db_stress_tool/db_stress_test_base.cc
  33. 2
      file/file_prefetch_buffer.h
  34. 3
      monitoring/histogram.cc
  35. 4
      monitoring/persistent_stats_history.cc
  36. 7
      options/cf_options.cc
  37. 5
      options/options_test.cc
  38. 10
      port/port_posix.h
  39. 26
      port/win/port_win.h
  40. 2
      table/block_based/block.cc
  41. 2
      table/block_based/block_based_table_factory.cc
  42. 2
      table/cuckoo/cuckoo_table_builder.h
  43. 4
      table/meta_blocks.cc
  44. 2
      table/table_properties.cc
  45. 20
      tools/block_cache_analyzer/block_cache_trace_analyzer.cc
  46. 6
      tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc
  47. 3
      tools/db_bench_tool.cc
  48. 4
      tools/sst_dump_tool.cc
  49. 2
      tools/trace_analyzer_tool.cc
  50. 2
      trace_replay/block_cache_tracer.h
  51. 8
      util/heap.h
  52. 17
      util/rate_limiter.cc
  53. 2
      util/rate_limiter_test.cc
  54. 3
      util/string_util.cc
  55. 10
      utilities/backup/backup_engine.cc
  56. 3
      utilities/backup/backup_engine_test.cc
  57. 4
      utilities/write_batch_with_index/write_batch_with_index_internal.h

@ -501,7 +501,8 @@ std::vector<std::string> ColumnFamilyData::GetDbPaths() const {
return paths; return paths;
} }
const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId = port::kMaxUint32; const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId =
std::numeric_limits<uint32_t>::max();
ColumnFamilyData::ColumnFamilyData( ColumnFamilyData::ColumnFamilyData(
uint32_t id, const std::string& name, Version* _dummy_versions, uint32_t id, const std::string& name, Version* _dummy_versions,
@ -826,8 +827,8 @@ int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger,
// condition. // condition.
// Or twice as compaction trigger, if it is smaller. // Or twice as compaction trigger, if it is smaller.
int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown); int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown);
if (res >= port::kMaxInt32) { if (res >= std::numeric_limits<int32_t>::max()) {
return port::kMaxInt32; return std::numeric_limits<int32_t>::max();
} else { } else {
// res fits in int // res fits in int
return static_cast<int>(res); return static_cast<int>(res);

@ -518,7 +518,7 @@ uint64_t Compaction::OutputFilePreallocationSize() const {
} }
} }
if (max_output_file_size_ != port::kMaxUint64 && if (max_output_file_size_ != std::numeric_limits<uint64_t>::max() &&
(immutable_options_.compaction_style == kCompactionStyleLevel || (immutable_options_.compaction_style == kCompactionStyleLevel ||
output_level() > 0)) { output_level() > 0)) {
preallocation_size = std::min(max_output_file_size_, preallocation_size); preallocation_size = std::min(max_output_file_size_, preallocation_size);
@ -616,7 +616,7 @@ bool Compaction::DoesInputReferenceBlobFiles() const {
uint64_t Compaction::MinInputFileOldestAncesterTime( uint64_t Compaction::MinInputFileOldestAncesterTime(
const InternalKey* start, const InternalKey* end) const { const InternalKey* start, const InternalKey* end) const {
uint64_t min_oldest_ancester_time = port::kMaxUint64; uint64_t min_oldest_ancester_time = std::numeric_limits<uint64_t>::max();
const InternalKeyComparator& icmp = const InternalKeyComparator& icmp =
column_family_data()->internal_comparator(); column_family_data()->internal_comparator();
for (const auto& level_files : inputs_) { for (const auto& level_files : inputs_) {

@ -1974,7 +1974,8 @@ Status CompactionJob::FinishCompactionOutputFile(
refined_oldest_ancester_time = refined_oldest_ancester_time =
sub_compact->compaction->MinInputFileOldestAncesterTime( sub_compact->compaction->MinInputFileOldestAncesterTime(
&(meta->smallest), &(meta->largest)); &(meta->smallest), &(meta->largest));
if (refined_oldest_ancester_time != port::kMaxUint64) { if (refined_oldest_ancester_time !=
std::numeric_limits<uint64_t>::max()) {
meta->oldest_ancester_time = refined_oldest_ancester_time; meta->oldest_ancester_time = refined_oldest_ancester_time;
} }
} }
@ -2264,7 +2265,7 @@ Status CompactionJob::OpenCompactionOutputFile(
sub_compact->compaction->MinInputFileOldestAncesterTime( sub_compact->compaction->MinInputFileOldestAncesterTime(
(sub_compact->start != nullptr) ? &tmp_start : nullptr, (sub_compact->start != nullptr) ? &tmp_start : nullptr,
(sub_compact->end != nullptr) ? &tmp_end : nullptr); (sub_compact->end != nullptr) ? &tmp_end : nullptr);
if (oldest_ancester_time == port::kMaxUint64) { if (oldest_ancester_time == std::numeric_limits<uint64_t>::max()) {
oldest_ancester_time = current_time; oldest_ancester_time = current_time;
} }

@ -65,7 +65,7 @@ bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
size_t compact_bytes = static_cast<size_t>(level_files[start]->fd.file_size); size_t compact_bytes = static_cast<size_t>(level_files[start]->fd.file_size);
uint64_t compensated_compact_bytes = uint64_t compensated_compact_bytes =
level_files[start]->compensated_file_size; level_files[start]->compensated_file_size;
size_t compact_bytes_per_del_file = port::kMaxSizet; size_t compact_bytes_per_del_file = std::numeric_limits<size_t>::max();
// Compaction range will be [start, limit). // Compaction range will be [start, limit).
size_t limit; size_t limit;
// Pull in files until the amount of compaction work per deleted file begins // Pull in files until the amount of compaction work per deleted file begins
@ -717,7 +717,7 @@ Compaction* CompactionPicker::CompactRange(
// files that are created during the current compaction. // files that are created during the current compaction.
if (compact_range_options.bottommost_level_compaction == if (compact_range_options.bottommost_level_compaction ==
BottommostLevelCompaction::kForceOptimized && BottommostLevelCompaction::kForceOptimized &&
max_file_num_to_ignore != port::kMaxUint64) { max_file_num_to_ignore != std::numeric_limits<uint64_t>::max()) {
assert(input_level == output_level); assert(input_level == output_level);
// inputs_shrunk holds a continuous subset of input files which were all // inputs_shrunk holds a continuous subset of input files which were all
// created before the current manual compaction // created before the current manual compaction

@ -504,7 +504,7 @@ bool LevelCompactionBuilder::PickIntraL0Compaction() {
return false; return false;
} }
return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction, return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction,
port::kMaxUint64, std::numeric_limits<uint64_t>::max(),
mutable_cf_options_.max_compaction_bytes, mutable_cf_options_.max_compaction_bytes,
&start_level_inputs_, earliest_mem_seqno_); &start_level_inputs_, earliest_mem_seqno_);
} }

@ -2653,8 +2653,8 @@ TEST_F(CompactionPickerTest, UniversalMarkedManualCompaction) {
universal_compaction_picker.CompactRange( universal_compaction_picker.CompactRange(
cf_name_, mutable_cf_options_, mutable_db_options_, vstorage_.get(), cf_name_, mutable_cf_options_, mutable_db_options_, vstorage_.get(),
ColumnFamilyData::kCompactAllLevels, 6, CompactRangeOptions(), ColumnFamilyData::kCompactAllLevels, 6, CompactRangeOptions(),
nullptr, nullptr, &manual_end, &manual_conflict, port::kMaxUint64, nullptr, nullptr, &manual_end, &manual_conflict,
"")); std::numeric_limits<uint64_t>::max(), ""));
ASSERT_TRUE(compaction); ASSERT_TRUE(compaction);

@ -1371,7 +1371,7 @@ Compaction* UniversalCompactionBuilder::PickPeriodicCompaction() {
uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const { uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const {
if (!mutable_cf_options_.compaction_options_universal.incremental) { if (!mutable_cf_options_.compaction_options_universal.incremental) {
return port::kMaxUint64; return std::numeric_limits<uint64_t>::max();
} else { } else {
// Try to align cutting boundary with files at the next level if the // Try to align cutting boundary with files at the next level if the
// file isn't end up with 1/2 of target size, or it would overlap // file isn't end up with 1/2 of target size, or it would overlap

@ -4404,7 +4404,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) {
for (CompactionFilterType comp_filter_type : for (CompactionFilterType comp_filter_type :
{kUseCompactionFilter, kUseCompactionFilterFactory}) { {kUseCompactionFilter, kUseCompactionFilterFactory}) {
// Assert that periodic compactions are not enabled. // Assert that periodic compactions are not enabled.
ASSERT_EQ(port::kMaxUint64 - 1, options.periodic_compaction_seconds); ASSERT_EQ(std::numeric_limits<uint64_t>::max() - 1,
options.periodic_compaction_seconds);
if (comp_filter_type == kUseCompactionFilter) { if (comp_filter_type == kUseCompactionFilter) {
options.compaction_filter = &test_compaction_filter; options.compaction_filter = &test_compaction_filter;

@ -177,7 +177,7 @@ Status DBImpl::GetLiveFilesStorageInfo(
VectorLogPtr live_wal_files; VectorLogPtr live_wal_files;
bool flush_memtable = true; bool flush_memtable = true;
if (!immutable_db_options_.allow_2pc) { if (!immutable_db_options_.allow_2pc) {
if (opts.wal_size_for_flush == port::kMaxUint64) { if (opts.wal_size_for_flush == std::numeric_limits<uint64_t>::max()) {
flush_memtable = false; flush_memtable = false;
} else if (opts.wal_size_for_flush > 0) { } else if (opts.wal_size_for_flush > 0) {
// If the outstanding log files are small, we skip the flush. // If the outstanding log files are small, we skip the flush.

@ -2356,7 +2356,7 @@ TEST_P(DBAtomicFlushTest, PrecomputeMinLogNumberToKeepNon2PC) {
ASSERT_OK(Flush(cf_ids)); ASSERT_OK(Flush(cf_ids));
uint64_t log_num_after_flush = dbfull()->TEST_GetCurrentLogNumber(); uint64_t log_num_after_flush = dbfull()->TEST_GetCurrentLogNumber();
uint64_t min_log_number_to_keep = port::kMaxUint64; uint64_t min_log_number_to_keep = std::numeric_limits<uint64_t>::max();
autovector<ColumnFamilyData*> flushed_cfds; autovector<ColumnFamilyData*> flushed_cfds;
autovector<autovector<VersionEdit*>> flush_edits; autovector<autovector<VersionEdit*>> flush_edits;
for (size_t i = 0; i != num_cfs; ++i) { for (size_t i = 0; i != num_cfs; ++i) {

@ -5338,7 +5338,7 @@ Status DBImpl::ReserveFileNumbersBeforeIngestion(
Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) { Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
if (mutable_db_options_.max_open_files == -1) { if (mutable_db_options_.max_open_files == -1) {
uint64_t oldest_time = port::kMaxUint64; uint64_t oldest_time = std::numeric_limits<uint64_t>::max();
for (auto cfd : *versions_->GetColumnFamilySet()) { for (auto cfd : *versions_->GetColumnFamilySet()) {
if (!cfd->IsDropped()) { if (!cfd->IsDropped()) {
uint64_t ctime; uint64_t ctime;

@ -2299,7 +2299,7 @@ class DBImpl : public DB {
static const int KEEP_LOG_FILE_NUM = 1000; static const int KEEP_LOG_FILE_NUM = 1000;
// MSVC version 1800 still does not have constexpr for ::max() // MSVC version 1800 still does not have constexpr for ::max()
static const uint64_t kNoTimeOut = port::kMaxUint64; static const uint64_t kNoTimeOut = std::numeric_limits<uint64_t>::max();
std::string db_absolute_path_; std::string db_absolute_path_;

@ -188,7 +188,7 @@ Status DBImpl::FlushMemTableToOutputFile(
// a memtable without knowing such snapshot(s). // a memtable without knowing such snapshot(s).
uint64_t max_memtable_id = needs_to_sync_closed_wals uint64_t max_memtable_id = needs_to_sync_closed_wals
? cfd->imm()->GetLatestMemTableID() ? cfd->imm()->GetLatestMemTableID()
: port::kMaxUint64; : std::numeric_limits<uint64_t>::max();
// If needs_to_sync_closed_wals is false, then the flush job will pick ALL // If needs_to_sync_closed_wals is false, then the flush job will pick ALL
// existing memtables of the column family when PickMemTable() is called // existing memtables of the column family when PickMemTable() is called
@ -1041,7 +1041,8 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
} }
s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels, s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
final_output_level, options, begin, end, exclusive, final_output_level, options, begin, end, exclusive,
false, port::kMaxUint64, trim_ts); false, std::numeric_limits<uint64_t>::max(),
trim_ts);
} else { } else {
int first_overlapped_level = kInvalidLevel; int first_overlapped_level = kInvalidLevel;
int max_overlapped_level = kInvalidLevel; int max_overlapped_level = kInvalidLevel;
@ -1078,7 +1079,7 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
if (s.ok() && first_overlapped_level != kInvalidLevel) { if (s.ok() && first_overlapped_level != kInvalidLevel) {
// max_file_num_to_ignore can be used to filter out newly created SST // max_file_num_to_ignore can be used to filter out newly created SST
// files, useful for bottom level compaction in a manual compaction // files, useful for bottom level compaction in a manual compaction
uint64_t max_file_num_to_ignore = port::kMaxUint64; uint64_t max_file_num_to_ignore = std::numeric_limits<uint64_t>::max();
uint64_t next_file_number = versions_->current_next_file_number(); uint64_t next_file_number = versions_->current_next_file_number();
final_output_level = max_overlapped_level; final_output_level = max_overlapped_level;
int output_level; int output_level;
@ -2015,7 +2016,7 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
// be created and scheduled, status::OK() will be returned. // be created and scheduled, status::OK() will be returned.
s = SwitchMemtable(cfd, &context); s = SwitchMemtable(cfd, &context);
} }
const uint64_t flush_memtable_id = port::kMaxUint64; const uint64_t flush_memtable_id = std::numeric_limits<uint64_t>::max();
if (s.ok()) { if (s.ok()) {
if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() || if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
!cached_recoverable_state_empty_.load()) { !cached_recoverable_state_empty_.load()) {

@ -118,10 +118,11 @@ Status DBImpl::TEST_CompactRange(int level, const Slice* begin,
cfd->ioptions()->compaction_style == kCompactionStyleFIFO) cfd->ioptions()->compaction_style == kCompactionStyleFIFO)
? level ? level
: level + 1; : level + 1;
return RunManualCompaction(cfd, level, output_level, CompactRangeOptions(), return RunManualCompaction(
begin, end, true, disallow_trivial_move, cfd, level, output_level, CompactRangeOptions(), begin, end, true,
port::kMaxUint64 /*max_file_num_to_ignore*/, disallow_trivial_move,
"" /*trim_ts*/); std::numeric_limits<uint64_t>::max() /*max_file_num_to_ignore*/,
"" /*trim_ts*/);
} }
Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) { Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) {

@ -761,7 +761,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC(
assert(!cfds_to_flush.empty()); assert(!cfds_to_flush.empty());
assert(cfds_to_flush.size() == edit_lists.size()); assert(cfds_to_flush.size() == edit_lists.size());
uint64_t min_log_number_to_keep = port::kMaxUint64; uint64_t min_log_number_to_keep = std::numeric_limits<uint64_t>::max();
for (const auto& edit_list : edit_lists) { for (const auto& edit_list : edit_lists) {
uint64_t log = 0; uint64_t log = 0;
for (const auto& e : edit_list) { for (const auto& e : edit_list) {
@ -773,7 +773,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC(
min_log_number_to_keep = std::min(min_log_number_to_keep, log); min_log_number_to_keep = std::min(min_log_number_to_keep, log);
} }
} }
if (min_log_number_to_keep == port::kMaxUint64) { if (min_log_number_to_keep == std::numeric_limits<uint64_t>::max()) {
min_log_number_to_keep = cfds_to_flush[0]->GetLogNumber(); min_log_number_to_keep = cfds_to_flush[0]->GetLogNumber();
for (size_t i = 1; i < cfds_to_flush.size(); i++) { for (size_t i = 1; i < cfds_to_flush.size(); i++) {
min_log_number_to_keep = min_log_number_to_keep =

@ -247,15 +247,16 @@ Status DBImplSecondary::RecoverLogFiles(
if (seq_of_batch <= seq) { if (seq_of_batch <= seq) {
continue; continue;
} }
auto curr_log_num = port::kMaxUint64; auto curr_log_num = std::numeric_limits<uint64_t>::max();
if (cfd_to_current_log_.count(cfd) > 0) { if (cfd_to_current_log_.count(cfd) > 0) {
curr_log_num = cfd_to_current_log_[cfd]; curr_log_num = cfd_to_current_log_[cfd];
} }
// If the active memtable contains records added by replaying an // If the active memtable contains records added by replaying an
// earlier WAL, then we need to seal the memtable, add it to the // earlier WAL, then we need to seal the memtable, add it to the
// immutable memtable list and create a new active memtable. // immutable memtable list and create a new active memtable.
if (!cfd->mem()->IsEmpty() && (curr_log_num == port::kMaxUint64 || if (!cfd->mem()->IsEmpty() &&
curr_log_num != log_number)) { (curr_log_num == std::numeric_limits<uint64_t>::max() ||
curr_log_num != log_number)) {
const MutableCFOptions mutable_cf_options = const MutableCFOptions mutable_cf_options =
*cfd->GetLatestMutableCFOptions(); *cfd->GetLatestMutableCFOptions();
MemTable* new_mem = MemTable* new_mem =

@ -79,7 +79,7 @@ class DbKvChecksumTest
void CorruptNextByteCallBack(void* arg) { void CorruptNextByteCallBack(void* arg) {
Slice encoded = *static_cast<Slice*>(arg); Slice encoded = *static_cast<Slice*>(arg);
if (entry_len_ == port::kMaxSizet) { if (entry_len_ == std::numeric_limits<size_t>::max()) {
// We learn the entry size on the first attempt // We learn the entry size on the first attempt
entry_len_ = encoded.size(); entry_len_ = encoded.size();
} }
@ -96,7 +96,7 @@ class DbKvChecksumTest
WriteBatchOpType op_type_; WriteBatchOpType op_type_;
char corrupt_byte_addend_; char corrupt_byte_addend_;
size_t corrupt_byte_offset_ = 0; size_t corrupt_byte_offset_ = 0;
size_t entry_len_ = port::kMaxSizet; size_t entry_len_ = std::numeric_limits<size_t>::max();
}; };
std::string GetTestNameSuffix( std::string GetTestNameSuffix(

@ -97,7 +97,7 @@ class MockMemTableRepFactory : public MemTableRepFactory {
private: private:
MockMemTableRep* mock_rep_; MockMemTableRep* mock_rep_;
// workaround since there's no port::kMaxUint32 yet. // workaround since there's no std::numeric_limits<uint32_t>::max() yet.
uint32_t last_column_family_id_ = static_cast<uint32_t>(-1); uint32_t last_column_family_id_ = static_cast<uint32_t>(-1);
}; };

@ -500,7 +500,8 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) {
1 /* input_level */, 2 /* output_level */, CompactRangeOptions(), 1 /* input_level */, 2 /* output_level */, CompactRangeOptions(),
nullptr /* begin */, nullptr /* end */, true /* exclusive */, nullptr /* begin */, nullptr /* end */, true /* exclusive */,
true /* disallow_trivial_move */, true /* disallow_trivial_move */,
port::kMaxUint64 /* max_file_num_to_ignore */, "" /*trim_ts*/)); std::numeric_limits<uint64_t>::max() /* max_file_num_to_ignore */,
"" /*trim_ts*/));
} }
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE

@ -1009,7 +1009,7 @@ TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) {
if (log_files.size() > 0) { if (log_files.size() > 0) {
earliest_log_nums[i] = log_files[0]->LogNumber(); earliest_log_nums[i] = log_files[0]->LogNumber();
} else { } else {
earliest_log_nums[i] = port::kMaxUint64; earliest_log_nums[i] = std::numeric_limits<uint64_t>::max();
} }
} }
// Check at least the first WAL was cleaned up during the recovery. // Check at least the first WAL was cleaned up during the recovery.

@ -90,7 +90,8 @@ inline bool IsExtendedValueType(ValueType t) {
// can be packed together into 64-bits. // can be packed together into 64-bits.
static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1); static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
static const SequenceNumber kDisableGlobalSequenceNumber = port::kMaxUint64; static const SequenceNumber kDisableGlobalSequenceNumber =
std::numeric_limits<uint64_t>::max();
constexpr uint64_t kNumInternalBytes = 8; constexpr uint64_t kNumInternalBytes = 8;

@ -2405,7 +2405,7 @@ TEST_P(ExternalSSTBlockChecksumTest, DISABLED_HugeBlockChecksum) {
SstFileWriter sst_file_writer(EnvOptions(), options); SstFileWriter sst_file_writer(EnvOptions(), options);
// 2^32 - 1, will lead to data block with more than 2^32 bytes // 2^32 - 1, will lead to data block with more than 2^32 bytes
size_t huge_size = port::kMaxUint32; size_t huge_size = std::numeric_limits<uint32_t>::max();
std::string f = sst_files_dir_ + "f.sst"; std::string f = sst_files_dir_ + "f.sst";
ASSERT_OK(sst_file_writer.Open(f)); ASSERT_OK(sst_file_writer.Open(f));

@ -58,10 +58,7 @@ class FileIndexer {
void UpdateIndex(Arena* arena, const size_t num_levels, void UpdateIndex(Arena* arena, const size_t num_levels,
std::vector<FileMetaData*>* const files); std::vector<FileMetaData*>* const files);
enum { enum { kLevelMaxIndex = std::numeric_limits<int32_t>::max() };
// MSVC version 1800 still does not have constexpr for ::max()
kLevelMaxIndex = ROCKSDB_NAMESPACE::port::kMaxInt32
};
private: private:
size_t num_levels_; size_t num_levels_;

@ -164,12 +164,12 @@ TEST_F(FlushJobTest, Empty) {
SnapshotChecker* snapshot_checker = nullptr; // not relavant SnapshotChecker* snapshot_checker = nullptr; // not relavant
FlushJob flush_job( FlushJob flush_job(
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
*cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_, {}, std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
nullptr, kNoCompression, nullptr, &event_logger, false, snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
true /* sync_output_directory */, true /* write_manifest */, nullptr, &event_logger, false, true /* sync_output_directory */,
Env::Priority::USER, nullptr /*IOTracer*/); true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/);
{ {
InstrumentedMutexLock l(&mutex_); InstrumentedMutexLock l(&mutex_);
flush_job.PickMemTable(); flush_job.PickMemTable();
@ -248,11 +248,12 @@ TEST_F(FlushJobTest, NonEmpty) {
SnapshotChecker* snapshot_checker = nullptr; // not relavant SnapshotChecker* snapshot_checker = nullptr; // not relavant
FlushJob flush_job( FlushJob flush_job(
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
*cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_, {}, std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber,
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
true, true /* sync_output_directory */, true /* write_manifest */, db_options_.statistics.get(), &event_logger, true,
true /* sync_output_directory */, true /* write_manifest */,
Env::Priority::USER, nullptr /*IOTracer*/); Env::Priority::USER, nullptr /*IOTracer*/);
HistogramData hist; HistogramData hist;
@ -509,11 +510,12 @@ TEST_F(FlushJobTest, Snapshots) {
SnapshotChecker* snapshot_checker = nullptr; // not relavant SnapshotChecker* snapshot_checker = nullptr; // not relavant
FlushJob flush_job( FlushJob flush_job(
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
*cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_, snapshots, std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
true, true /* sync_output_directory */, true /* write_manifest */, db_options_.statistics.get(), &event_logger, true,
true /* sync_output_directory */, true /* write_manifest */,
Env::Priority::USER, nullptr /*IOTracer*/); Env::Priority::USER, nullptr /*IOTracer*/);
mutex_.Lock(); mutex_.Lock();
flush_job.PickMemTable(); flush_job.PickMemTable();
@ -577,9 +579,9 @@ TEST_F(FlushJobTimestampTest, AllKeysExpired) {
PutFixed64(&full_history_ts_low, std::numeric_limits<uint64_t>::max()); PutFixed64(&full_history_ts_low, std::numeric_limits<uint64_t>::max());
FlushJob flush_job( FlushJob flush_job(
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(), dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(), std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
&mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
&job_context, nullptr, nullptr, nullptr, kNoCompression, snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
db_options_.statistics.get(), &event_logger, true, db_options_.statistics.get(), &event_logger, true,
true /* sync_output_directory */, true /* write_manifest */, true /* sync_output_directory */, true /* write_manifest */,
Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"", Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"",
@ -628,9 +630,9 @@ TEST_F(FlushJobTimestampTest, NoKeyExpired) {
PutFixed64(&full_history_ts_low, 0); PutFixed64(&full_history_ts_low, 0);
FlushJob flush_job( FlushJob flush_job(
dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(), dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(),
port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(), std::numeric_limits<uint64_t>::max() /* memtable_id */, env_options_,
&mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber,
&job_context, nullptr, nullptr, nullptr, kNoCompression, snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression,
db_options_.statistics.get(), &event_logger, true, db_options_.statistics.get(), &event_logger, true,
true /* sync_output_directory */, true /* write_manifest */, true /* sync_output_directory */, true /* write_manifest */,
Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"", Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"",

@ -140,8 +140,8 @@ size_t MemTable::ApproximateMemoryUsage() {
for (size_t usage : usages) { for (size_t usage : usages) {
// If usage + total_usage >= kMaxSizet, return kMaxSizet. // If usage + total_usage >= kMaxSizet, return kMaxSizet.
// the following variation is to avoid numeric overflow. // the following variation is to avoid numeric overflow.
if (usage >= port::kMaxSizet - total_usage) { if (usage >= std::numeric_limits<size_t>::max() - total_usage) {
return port::kMaxSizet; return std::numeric_limits<size_t>::max();
} }
total_usage += usage; total_usage += usage;
} }

@ -209,7 +209,8 @@ TEST_F(MemTableListTest, Empty) {
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
autovector<MemTable*> mems; autovector<MemTable*> mems;
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &mems); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &mems);
ASSERT_EQ(0, mems.size()); ASSERT_EQ(0, mems.size());
autovector<MemTable*> to_delete; autovector<MemTable*> to_delete;
@ -418,7 +419,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) {
// Flush this memtable from the list. // Flush this memtable from the list.
// (It will then be a part of the memtable history). // (It will then be a part of the memtable history).
autovector<MemTable*> to_flush; autovector<MemTable*> to_flush;
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
ASSERT_EQ(1, to_flush.size()); ASSERT_EQ(1, to_flush.size());
MutableCFOptions mutable_cf_options(options); MutableCFOptions mutable_cf_options(options);
@ -472,7 +474,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) {
ASSERT_EQ(0, to_delete.size()); ASSERT_EQ(0, to_delete.size());
to_flush.clear(); to_flush.clear();
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
ASSERT_EQ(1, to_flush.size()); ASSERT_EQ(1, to_flush.size());
// Flush second memtable // Flush second memtable
@ -593,7 +596,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
autovector<MemTable*> to_flush; autovector<MemTable*> to_flush;
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
ASSERT_EQ(0, to_flush.size()); ASSERT_EQ(0, to_flush.size());
// Request a flush even though there is nothing to flush // Request a flush even though there is nothing to flush
@ -602,7 +606,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Attempt to 'flush' to clear request for flush // Attempt to 'flush' to clear request for flush
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
ASSERT_EQ(0, to_flush.size()); ASSERT_EQ(0, to_flush.size());
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
@ -626,7 +631,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
// Pick tables to flush // Pick tables to flush
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
ASSERT_EQ(2, to_flush.size()); ASSERT_EQ(2, to_flush.size());
ASSERT_EQ(2, list.NumNotFlushed()); ASSERT_EQ(2, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
@ -647,7 +653,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
ASSERT_EQ(0, to_delete.size()); ASSERT_EQ(0, to_delete.size());
// Pick tables to flush // Pick tables to flush
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
ASSERT_EQ(3, to_flush.size()); ASSERT_EQ(3, to_flush.size());
ASSERT_EQ(3, list.NumNotFlushed()); ASSERT_EQ(3, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
@ -655,7 +662,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
// Pick tables to flush again // Pick tables to flush again
autovector<MemTable*> to_flush2; autovector<MemTable*> to_flush2;
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush2);
ASSERT_EQ(0, to_flush2.size()); ASSERT_EQ(0, to_flush2.size());
ASSERT_EQ(3, list.NumNotFlushed()); ASSERT_EQ(3, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
@ -673,7 +681,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
// Pick tables to flush again // Pick tables to flush again
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush2);
ASSERT_EQ(1, to_flush2.size()); ASSERT_EQ(1, to_flush2.size());
ASSERT_EQ(4, list.NumNotFlushed()); ASSERT_EQ(4, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
@ -694,7 +703,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
ASSERT_EQ(0, to_delete.size()); ASSERT_EQ(0, to_delete.size());
// Pick tables to flush // Pick tables to flush
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush);
// Should pick 4 of 5 since 1 table has been picked in to_flush2 // Should pick 4 of 5 since 1 table has been picked in to_flush2
ASSERT_EQ(4, to_flush.size()); ASSERT_EQ(4, to_flush.size());
ASSERT_EQ(5, list.NumNotFlushed()); ASSERT_EQ(5, list.NumNotFlushed());
@ -703,7 +713,8 @@ TEST_F(MemTableListTest, FlushPendingTest) {
// Pick tables to flush again // Pick tables to flush again
autovector<MemTable*> to_flush3; autovector<MemTable*> to_flush3;
list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush3); list.PickMemtablesToFlush(
std::numeric_limits<uint64_t>::max() /* memtable_id */, &to_flush3);
ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed
ASSERT_EQ(5, list.NumNotFlushed()); ASSERT_EQ(5, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.IsFlushPending());
@ -872,8 +883,9 @@ TEST_F(MemTableListTest, AtomicFlusTest) {
auto* list = lists[i]; auto* list = lists[i];
ASSERT_FALSE(list->IsFlushPending()); ASSERT_FALSE(list->IsFlushPending());
ASSERT_FALSE(list->imm_flush_needed.load(std::memory_order_acquire)); ASSERT_FALSE(list->imm_flush_needed.load(std::memory_order_acquire));
list->PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, list->PickMemtablesToFlush(
&flush_candidates[i]); std::numeric_limits<uint64_t>::max() /* memtable_id */,
&flush_candidates[i]);
ASSERT_EQ(0, flush_candidates[i].size()); ASSERT_EQ(0, flush_candidates[i].size());
} }
// Request flush even though there is nothing to flush // Request flush even though there is nothing to flush

@ -1144,7 +1144,7 @@ class VersionBuilder::Rep {
size_t table_cache_capacity = table_cache_->get_cache()->GetCapacity(); size_t table_cache_capacity = table_cache_->get_cache()->GetCapacity();
bool always_load = (table_cache_capacity == TableCache::kInfiniteCapacity); bool always_load = (table_cache_capacity == TableCache::kInfiniteCapacity);
size_t max_load = port::kMaxSizet; size_t max_load = std::numeric_limits<size_t>::max();
if (!always_load) { if (!always_load) {
// If it is initial loading and not set to always loading all the // If it is initial loading and not set to always loading all the

@ -1517,7 +1517,7 @@ uint64_t Version::GetSstFilesSize() {
} }
void Version::GetCreationTimeOfOldestFile(uint64_t* creation_time) { void Version::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
uint64_t oldest_time = port::kMaxUint64; uint64_t oldest_time = std::numeric_limits<uint64_t>::max();
for (int level = 0; level < storage_info_.num_non_empty_levels_; level++) { for (int level = 0; level < storage_info_.num_non_empty_levels_; level++) {
for (FileMetaData* meta : storage_info_.LevelFiles(level)) { for (FileMetaData* meta : storage_info_.LevelFiles(level)) {
assert(meta->fd.table_reader != nullptr); assert(meta->fd.table_reader != nullptr);

@ -1213,7 +1213,7 @@ class VersionSet {
// new_log_number_for_empty_cf. // new_log_number_for_empty_cf.
uint64_t PreComputeMinLogNumberWithUnflushedData( uint64_t PreComputeMinLogNumberWithUnflushedData(
uint64_t new_log_number_for_empty_cf) const { uint64_t new_log_number_for_empty_cf) const {
uint64_t min_log_num = port::kMaxUint64; uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
for (auto cfd : *column_family_set_) { for (auto cfd : *column_family_set_) {
// It's safe to ignore dropped column families here: // It's safe to ignore dropped column families here:
// cfd->IsDropped() becomes true after the drop is persisted in MANIFEST. // cfd->IsDropped() becomes true after the drop is persisted in MANIFEST.
@ -1229,7 +1229,7 @@ class VersionSet {
// file, except data from `cfd_to_skip`. // file, except data from `cfd_to_skip`.
uint64_t PreComputeMinLogNumberWithUnflushedData( uint64_t PreComputeMinLogNumberWithUnflushedData(
const ColumnFamilyData* cfd_to_skip) const { const ColumnFamilyData* cfd_to_skip) const {
uint64_t min_log_num = port::kMaxUint64; uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
for (auto cfd : *column_family_set_) { for (auto cfd : *column_family_set_) {
if (cfd == cfd_to_skip) { if (cfd == cfd_to_skip) {
continue; continue;
@ -1246,7 +1246,7 @@ class VersionSet {
// file, except data from `cfds_to_skip`. // file, except data from `cfds_to_skip`.
uint64_t PreComputeMinLogNumberWithUnflushedData( uint64_t PreComputeMinLogNumberWithUnflushedData(
const std::unordered_set<const ColumnFamilyData*>& cfds_to_skip) const { const std::unordered_set<const ColumnFamilyData*>& cfds_to_skip) const {
uint64_t min_log_num = port::kMaxUint64; uint64_t min_log_num = std::numeric_limits<uint64_t>::max();
for (auto cfd : *column_family_set_) { for (auto cfd : *column_family_set_) {
if (cfds_to_skip.count(cfd)) { if (cfds_to_skip.count(cfd)) {
continue; continue;

@ -44,7 +44,8 @@ class WalMetadata {
private: private:
// The size of WAL is unknown, used when the WAL is not synced yet or is // The size of WAL is unknown, used when the WAL is not synced yet or is
// empty. // empty.
constexpr static uint64_t kUnknownWalSize = port::kMaxUint64; constexpr static uint64_t kUnknownWalSize =
std::numeric_limits<uint64_t>::max();
// Size of the most recently synced WAL in bytes. // Size of the most recently synced WAL in bytes.
uint64_t synced_size_bytes_ = kUnknownWalSize; uint64_t synced_size_bytes_ = kUnknownWalSize;

@ -745,10 +745,10 @@ Status CheckColumnFamilyTimestampSize(ColumnFamilyHandle* column_family,
Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id, Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id,
const Slice& key, const Slice& value) { const Slice& key, const Slice& value) {
if (key.size() > size_t{port::kMaxUint32}) { if (key.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
return Status::InvalidArgument("key is too large"); return Status::InvalidArgument("key is too large");
} }
if (value.size() > size_t{port::kMaxUint32}) { if (value.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
return Status::InvalidArgument("value is too large"); return Status::InvalidArgument("value is too large");
} }
@ -825,7 +825,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key,
for (int i = 0; i < key.num_parts; ++i) { for (int i = 0; i < key.num_parts; ++i) {
total_key_bytes += key.parts[i].size(); total_key_bytes += key.parts[i].size();
} }
if (total_key_bytes >= size_t{port::kMaxUint32}) { if (total_key_bytes >= size_t{std::numeric_limits<uint32_t>::max()}) {
return Status::InvalidArgument("key is too large"); return Status::InvalidArgument("key is too large");
} }
@ -833,7 +833,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key,
for (int i = 0; i < value.num_parts; ++i) { for (int i = 0; i < value.num_parts; ++i) {
total_value_bytes += value.parts[i].size(); total_value_bytes += value.parts[i].size();
} }
if (total_value_bytes >= size_t{port::kMaxUint32}) { if (total_value_bytes >= size_t{std::numeric_limits<uint32_t>::max()}) {
return Status::InvalidArgument("value is too large"); return Status::InvalidArgument("value is too large");
} }
return Status::OK(); return Status::OK();
@ -1292,10 +1292,10 @@ Status WriteBatch::DeleteRange(ColumnFamilyHandle* column_family,
Status WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id, Status WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id,
const Slice& key, const Slice& value) { const Slice& key, const Slice& value) {
if (key.size() > size_t{port::kMaxUint32}) { if (key.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
return Status::InvalidArgument("key is too large"); return Status::InvalidArgument("key is too large");
} }
if (value.size() > size_t{port::kMaxUint32}) { if (value.size() > size_t{std::numeric_limits<uint32_t>::max()}) {
return Status::InvalidArgument("value is too large"); return Status::InvalidArgument("value is too large");
} }

@ -2029,11 +2029,11 @@ void StressTest::TestAcquireSnapshot(ThreadState* thread,
if (FLAGS_long_running_snapshots) { if (FLAGS_long_running_snapshots) {
// Hold 10% of snapshots for 10x more // Hold 10% of snapshots for 10x more
if (thread->rand.OneIn(10)) { if (thread->rand.OneIn(10)) {
assert(hold_for < port::kMaxInt64 / 10); assert(hold_for < std::numeric_limits<uint64_t>::max() / 10);
hold_for *= 10; hold_for *= 10;
// Hold 1% of snapshots for 100x more // Hold 1% of snapshots for 100x more
if (thread->rand.OneIn(10)) { if (thread->rand.OneIn(10)) {
assert(hold_for < port::kMaxInt64 / 10); assert(hold_for < std::numeric_limits<uint64_t>::max() / 10);
hold_for *= 10; hold_for *= 10;
} }
} }
@ -2065,8 +2065,9 @@ void StressTest::TestCompactRange(ThreadState* thread, int64_t rand_key,
const Slice& start_key, const Slice& start_key,
ColumnFamilyHandle* column_family) { ColumnFamilyHandle* column_family) {
int64_t end_key_num; int64_t end_key_num;
if (port::kMaxInt64 - rand_key < FLAGS_compact_range_width) { if (std::numeric_limits<int64_t>::max() - rand_key <
end_key_num = port::kMaxInt64; FLAGS_compact_range_width) {
end_key_num = std::numeric_limits<int64_t>::max();
} else { } else {
end_key_num = FLAGS_compact_range_width + rand_key; end_key_num = FLAGS_compact_range_width + rand_key;
} }

@ -71,7 +71,7 @@ class FilePrefetchBuffer {
readahead_size_(readahead_size), readahead_size_(readahead_size),
initial_auto_readahead_size_(readahead_size), initial_auto_readahead_size_(readahead_size),
max_readahead_size_(max_readahead_size), max_readahead_size_(max_readahead_size),
min_offset_read_(port::kMaxSizet), min_offset_read_(std::numeric_limits<size_t>::max()),
enable_(enable), enable_(enable),
track_min_offset_(track_min_offset), track_min_offset_(track_min_offset),
implicit_auto_readahead_(implicit_auto_readahead), implicit_auto_readahead_(implicit_auto_readahead),

@ -26,7 +26,8 @@ HistogramBucketMapper::HistogramBucketMapper() {
// size of array buckets_ in HistogramImpl // size of array buckets_ in HistogramImpl
bucketValues_ = {1, 2}; bucketValues_ = {1, 2};
double bucket_val = static_cast<double>(bucketValues_.back()); double bucket_val = static_cast<double>(bucketValues_.back());
while ((bucket_val = 1.5 * bucket_val) <= static_cast<double>(port::kMaxUint64)) { while ((bucket_val = 1.5 * bucket_val) <=
static_cast<double>(std::numeric_limits<uint64_t>::max())) {
bucketValues_.push_back(static_cast<uint64_t>(bucket_val)); bucketValues_.push_back(static_cast<uint64_t>(bucket_val));
// Extracts two most significant digits to make histogram buckets more // Extracts two most significant digits to make histogram buckets more
// human-readable. E.g., 172 becomes 170. // human-readable. E.g., 172 becomes 170.

@ -98,13 +98,13 @@ std::pair<uint64_t, std::string> parseKey(const Slice& key,
std::string::size_type pos = key_str.find("#"); std::string::size_type pos = key_str.find("#");
// TODO(Zhongyi): add counters to track parse failures? // TODO(Zhongyi): add counters to track parse failures?
if (pos == std::string::npos) { if (pos == std::string::npos) {
result.first = port::kMaxUint64; result.first = std::numeric_limits<uint64_t>::max();
result.second.clear(); result.second.clear();
} else { } else {
uint64_t parsed_time = ParseUint64(key_str.substr(0, pos)); uint64_t parsed_time = ParseUint64(key_str.substr(0, pos));
// skip entries with timestamp smaller than start_time // skip entries with timestamp smaller than start_time
if (parsed_time < start_time) { if (parsed_time < start_time) {
result.first = port::kMaxUint64; result.first = std::numeric_limits<uint64_t>::max();
result.second = ""; result.second = "";
} else { } else {
result.first = parsed_time; result.first = parsed_time;

@ -886,7 +886,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, double op2) {
if (op1 == 0 || op2 <= 0) { if (op1 == 0 || op2 <= 0) {
return 0; return 0;
} }
if (port::kMaxUint64 / op1 < op2) { if (std::numeric_limits<uint64_t>::max() / op1 < op2) {
return op1; return op1;
} }
return static_cast<uint64_t>(op1 * op2); return static_cast<uint64_t>(op1 * op2);
@ -915,8 +915,9 @@ size_t MaxFileSizeForL0MetaPin(const MutableCFOptions& cf_options) {
// or a former larger `write_buffer_size` value to avoid surprising users with // or a former larger `write_buffer_size` value to avoid surprising users with
// pinned memory usage. We use a factor of 1.5 to account for overhead // pinned memory usage. We use a factor of 1.5 to account for overhead
// introduced during flush in most cases. // introduced during flush in most cases.
if (port::kMaxSizet / 3 < cf_options.write_buffer_size / 2) { if (std::numeric_limits<size_t>::max() / 3 <
return port::kMaxSizet; cf_options.write_buffer_size / 2) {
return std::numeric_limits<size_t>::max();
} }
return cf_options.write_buffer_size / 2 * 3; return cf_options.write_buffer_size / 2 * 3;
} }

@ -4082,9 +4082,10 @@ TEST_F(OptionsParserTest, IntegerParsing) {
ASSERT_EQ(ParseUint32("4294967295"), 4294967295U); ASSERT_EQ(ParseUint32("4294967295"), 4294967295U);
ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U); ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U);
ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807); ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807);
ASSERT_EQ(ParseInt64("-9223372036854775808"), port::kMinInt64); ASSERT_EQ(ParseInt64("-9223372036854775808"),
std::numeric_limits<int64_t>::min());
ASSERT_EQ(ParseInt32("2147483647"), 2147483647); ASSERT_EQ(ParseInt32("2147483647"), 2147483647);
ASSERT_EQ(ParseInt32("-2147483648"), port::kMinInt32); ASSERT_EQ(ParseInt32("-2147483648"), std::numeric_limits<int32_t>::min());
ASSERT_EQ(ParseInt("-32767"), -32767); ASSERT_EQ(ParseInt("-32767"), -32767);
ASSERT_EQ(ParseDouble("-1.234567"), -1.234567); ASSERT_EQ(ParseDouble("-1.234567"), -1.234567);
} }

@ -95,16 +95,6 @@ namespace ROCKSDB_NAMESPACE {
extern const bool kDefaultToAdaptiveMutex; extern const bool kDefaultToAdaptiveMutex;
namespace port { namespace port {
// For use at db/file_indexer.h kLevelMaxIndex
const uint32_t kMaxUint32 = std::numeric_limits<uint32_t>::max();
const int kMaxInt32 = std::numeric_limits<int32_t>::max();
const int kMinInt32 = std::numeric_limits<int32_t>::min();
const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
const int64_t kMaxInt64 = std::numeric_limits<int64_t>::max();
const int64_t kMinInt64 = std::numeric_limits<int64_t>::min();
const size_t kMaxSizet = std::numeric_limits<size_t>::max();
constexpr bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; constexpr bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN;
#undef PLATFORM_IS_LITTLE_ENDIAN #undef PLATFORM_IS_LITTLE_ENDIAN

@ -82,37 +82,11 @@ namespace port {
#define snprintf _snprintf #define snprintf _snprintf
#define ROCKSDB_NOEXCEPT #define ROCKSDB_NOEXCEPT
// std::numeric_limits<size_t>::max() is not constexpr just yet
// therefore, use the same limits
// For use at db/file_indexer.h kLevelMaxIndex
const uint32_t kMaxUint32 = UINT32_MAX;
const int kMaxInt32 = INT32_MAX;
const int kMinInt32 = INT32_MIN;
const int64_t kMaxInt64 = INT64_MAX;
const int64_t kMinInt64 = INT64_MIN;
const uint64_t kMaxUint64 = UINT64_MAX;
#ifdef _WIN64
const size_t kMaxSizet = UINT64_MAX;
#else
const size_t kMaxSizet = UINT_MAX;
#endif
#else // VS >= 2015 or MinGW #else // VS >= 2015 or MinGW
#define ROCKSDB_NOEXCEPT noexcept #define ROCKSDB_NOEXCEPT noexcept
// For use at db/file_indexer.h kLevelMaxIndex
const uint32_t kMaxUint32 = std::numeric_limits<uint32_t>::max();
const int kMaxInt32 = std::numeric_limits<int>::max();
const int kMinInt32 = std::numeric_limits<int>::min();
const uint64_t kMaxUint64 = std::numeric_limits<uint64_t>::max();
const int64_t kMaxInt64 = std::numeric_limits<int64_t>::max();
const int64_t kMinInt64 = std::numeric_limits<int64_t>::min();
const size_t kMaxSizet = std::numeric_limits<size_t>::max();
#endif //_MSC_VER #endif //_MSC_VER
// "Windows is designed to run on little-endian computer architectures." // "Windows is designed to run on little-endian computer architectures."

@ -721,7 +721,7 @@ void BlockIter<TValue>::FindKeyAfterBinarySeek(const Slice& target,
} else { } else {
// We are in the last restart interval. The while-loop will terminate by // We are in the last restart interval. The while-loop will terminate by
// `Valid()` returning false upon advancing past the block's last key. // `Valid()` returning false upon advancing past the block's last key.
max_offset = port::kMaxUint32; max_offset = std::numeric_limits<uint32_t>::max();
} }
while (true) { while (true) {
NextImpl(); NextImpl();

@ -658,7 +658,7 @@ Status BlockBasedTableFactory::ValidateOptions(
return Status::InvalidArgument( return Status::InvalidArgument(
"Block alignment requested but block size is not a power of 2"); "Block alignment requested but block size is not a power of 2");
} }
if (table_options_.block_size > port::kMaxUint32) { if (table_options_.block_size > std::numeric_limits<uint32_t>::max()) {
return Status::InvalidArgument( return Status::InvalidArgument(
"block size exceeds maximum number (4GiB) allowed"); "block size exceeds maximum number (4GiB) allowed");
} }

@ -85,7 +85,7 @@ class CuckooTableBuilder: public TableBuilder {
// We assume number of items is <= 2^32. // We assume number of items is <= 2^32.
uint32_t make_space_for_key_call_id; uint32_t make_space_for_key_call_id;
}; };
static const uint32_t kMaxVectorIdx = port::kMaxInt32; static const uint32_t kMaxVectorIdx = std::numeric_limits<int32_t>::max();
bool MakeSpaceForKey(const autovector<uint64_t>& hash_vals, bool MakeSpaceForKey(const autovector<uint64_t>& hash_vals,
const uint32_t call_id, const uint32_t call_id,

@ -53,8 +53,8 @@ Slice MetaIndexBuilder::Finish() {
// object, so there's no need for restart points. Thus we set the restart // object, so there's no need for restart points. Thus we set the restart
// interval to infinity to save space. // interval to infinity to save space.
PropertyBlockBuilder::PropertyBlockBuilder() PropertyBlockBuilder::PropertyBlockBuilder()
: properties_block_( : properties_block_(new BlockBuilder(
new BlockBuilder(port::kMaxInt32 /* restart interval */)) {} std::numeric_limits<int32_t>::max() /* restart interval */)) {}
void PropertyBlockBuilder::Add(const std::string& name, void PropertyBlockBuilder::Add(const std::string& name,
const std::string& val) { const std::string& val) {

@ -17,7 +17,7 @@
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily = const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily =
port::kMaxInt32; std::numeric_limits<int32_t>::max();
namespace { namespace {
void AppendProperty( void AppendProperty(

@ -412,7 +412,7 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const {
} }
std::map<uint64_t, std::map<std::string, std::map<uint64_t, double>>> std::map<uint64_t, std::map<std::string, std::map<uint64_t, double>>>
cs_name_timeline; cs_name_timeline;
uint64_t start_time = port::kMaxUint64; uint64_t start_time = std::numeric_limits<uint64_t>::max();
uint64_t end_time = 0; uint64_t end_time = 0;
const std::map<uint64_t, uint64_t>& trace_num_misses = const std::map<uint64_t, uint64_t>& trace_num_misses =
adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit); adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit);
@ -427,7 +427,8 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const {
auto it = trace_num_accesses.find(time); auto it = trace_num_accesses.find(time);
assert(it != trace_num_accesses.end()); assert(it != trace_num_accesses.end());
uint64_t access = it->second; uint64_t access = it->second;
cs_name_timeline[port::kMaxUint64]["trace"][time] = percent(miss, access); cs_name_timeline[std::numeric_limits<uint64_t>::max()]["trace"][time] =
percent(miss, access);
} }
for (auto const& config_caches : cache_simulator_->sim_caches()) { for (auto const& config_caches : cache_simulator_->sim_caches()) {
const CacheConfiguration& config = config_caches.first; const CacheConfiguration& config = config_caches.first;
@ -492,7 +493,7 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const {
} }
std::map<uint64_t, std::map<std::string, std::map<uint64_t, uint64_t>>> std::map<uint64_t, std::map<std::string, std::map<uint64_t, uint64_t>>>
cs_name_timeline; cs_name_timeline;
uint64_t start_time = port::kMaxUint64; uint64_t start_time = std::numeric_limits<uint64_t>::max();
uint64_t end_time = 0; uint64_t end_time = 0;
const std::map<uint64_t, uint64_t>& trace_num_misses = const std::map<uint64_t, uint64_t>& trace_num_misses =
adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit); adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit);
@ -501,7 +502,8 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const {
start_time = std::min(start_time, time); start_time = std::min(start_time, time);
end_time = std::max(end_time, time); end_time = std::max(end_time, time);
uint64_t miss = num_miss.second; uint64_t miss = num_miss.second;
cs_name_timeline[port::kMaxUint64]["trace"][time] = miss; cs_name_timeline[std::numeric_limits<uint64_t>::max()]["trace"][time] =
miss;
} }
for (auto const& config_caches : cache_simulator_->sim_caches()) { for (auto const& config_caches : cache_simulator_->sim_caches()) {
const CacheConfiguration& config = config_caches.first; const CacheConfiguration& config = config_caches.first;
@ -589,7 +591,7 @@ void BlockCacheTraceAnalyzer::WriteSkewness(
for (auto const& percent : percent_buckets) { for (auto const& percent : percent_buckets) {
label_bucket_naccesses[label_str][percent] = 0; label_bucket_naccesses[label_str][percent] = 0;
size_t end_index = 0; size_t end_index = 0;
if (percent == port::kMaxUint64) { if (percent == std::numeric_limits<uint64_t>::max()) {
end_index = label_naccesses.size(); end_index = label_naccesses.size();
} else { } else {
end_index = percent * label_naccesses.size() / 100; end_index = percent * label_naccesses.size() / 100;
@ -856,7 +858,7 @@ void BlockCacheTraceAnalyzer::WriteAccessTimeline(const std::string& label_str,
uint64_t time_unit, uint64_t time_unit,
bool user_access_only) const { bool user_access_only) const {
std::set<std::string> labels = ParseLabelStr(label_str); std::set<std::string> labels = ParseLabelStr(label_str);
uint64_t start_time = port::kMaxUint64; uint64_t start_time = std::numeric_limits<uint64_t>::max();
uint64_t end_time = 0; uint64_t end_time = 0;
std::map<std::string, std::map<uint64_t, uint64_t>> label_access_timeline; std::map<std::string, std::map<uint64_t, uint64_t>> label_access_timeline;
std::map<uint64_t, std::vector<std::string>> access_count_block_id_map; std::map<uint64_t, std::vector<std::string>> access_count_block_id_map;
@ -1091,7 +1093,7 @@ void BlockCacheTraceAnalyzer::WriteReuseInterval(
kMicrosInSecond) / kMicrosInSecond) /
block.num_accesses; block.num_accesses;
} else { } else {
avg_reuse_interval = port::kMaxUint64 - 1; avg_reuse_interval = std::numeric_limits<uint64_t>::max() - 1;
} }
if (labels.find(kGroupbyCaller) != labels.end()) { if (labels.find(kGroupbyCaller) != labels.end()) {
for (auto const& timeline : block.caller_num_accesses_timeline) { for (auto const& timeline : block.caller_num_accesses_timeline) {
@ -1152,7 +1154,7 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime(
lifetime = lifetime =
(block.last_access_time - block.first_access_time) / kMicrosInSecond; (block.last_access_time - block.first_access_time) / kMicrosInSecond;
} else { } else {
lifetime = port::kMaxUint64 - 1; lifetime = std::numeric_limits<uint64_t>::max() - 1;
} }
const std::string label = BuildLabel( const std::string label = BuildLabel(
labels, cf_name, fd, level, type, labels, cf_name, fd, level, type,
@ -2103,7 +2105,7 @@ std::vector<uint64_t> parse_buckets(const std::string& bucket_str) {
getline(ss, bucket, ','); getline(ss, bucket, ',');
buckets.push_back(ParseUint64(bucket)); buckets.push_back(ParseUint64(bucket));
} }
buckets.push_back(port::kMaxUint64); buckets.push_back(std::numeric_limits<uint64_t>::max());
return buckets; return buckets;
} }

@ -277,7 +277,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) {
ASSERT_OK(env_->DeleteFile(mrc_path)); ASSERT_OK(env_->DeleteFile(mrc_path));
const std::vector<std::string> time_units{"1", "60", "3600"}; const std::vector<std::string> time_units{"1", "60", "3600"};
expected_capacities.push_back(port::kMaxUint64); expected_capacities.push_back(std::numeric_limits<uint64_t>::max());
for (auto const& expected_capacity : expected_capacities) { for (auto const& expected_capacity : expected_capacities) {
for (auto const& time_unit : time_units) { for (auto const& time_unit : time_units) {
const std::string miss_ratio_timeline_path = const std::string miss_ratio_timeline_path =
@ -293,7 +293,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) {
std::string substr; std::string substr;
getline(ss, substr, ','); getline(ss, substr, ',');
if (!read_header) { if (!read_header) {
if (expected_capacity == port::kMaxUint64) { if (expected_capacity == std::numeric_limits<uint64_t>::max()) {
ASSERT_EQ("trace", substr); ASSERT_EQ("trace", substr);
} else { } else {
ASSERT_EQ("lru-1-0", substr); ASSERT_EQ("lru-1-0", substr);
@ -321,7 +321,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) {
std::string substr; std::string substr;
getline(ss, substr, ','); getline(ss, substr, ',');
if (num_misses == 0) { if (num_misses == 0) {
if (expected_capacity == port::kMaxUint64) { if (expected_capacity == std::numeric_limits<uint64_t>::max()) {
ASSERT_EQ("trace", substr); ASSERT_EQ("trace", substr);
} else { } else {
ASSERT_EQ("lru-1-0", substr); ASSERT_EQ("lru-1-0", substr);

@ -8073,7 +8073,8 @@ class Benchmark {
} }
std::unique_ptr<StatsHistoryIterator> shi; std::unique_ptr<StatsHistoryIterator> shi;
Status s = db->GetStatsHistory(0, port::kMaxUint64, &shi); Status s =
db->GetStatsHistory(0, std::numeric_limits<uint64_t>::max(), &shi);
if (!s.ok()) { if (!s.ok()) {
fprintf(stdout, "%s\n", s.ToString().c_str()); fprintf(stdout, "%s\n", s.ToString().c_str());
return; return;

@ -282,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
} else if (ParseIntArg(argv[i], "--compression_max_dict_bytes=", } else if (ParseIntArg(argv[i], "--compression_max_dict_bytes=",
"compression_max_dict_bytes must be numeric", "compression_max_dict_bytes must be numeric",
&tmp_val)) { &tmp_val)) {
if (tmp_val < 0 || tmp_val > port::kMaxUint32) { if (tmp_val < 0 || tmp_val > std::numeric_limits<uint32_t>::max()) {
fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n", fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n",
argv[i]); argv[i]);
print_help(/*to_stderr*/ true); print_help(/*to_stderr*/ true);
@ -292,7 +292,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) {
} else if (ParseIntArg(argv[i], "--compression_zstd_max_train_bytes=", } else if (ParseIntArg(argv[i], "--compression_zstd_max_train_bytes=",
"compression_zstd_max_train_bytes must be numeric", "compression_zstd_max_train_bytes must be numeric",
&tmp_val)) { &tmp_val)) {
if (tmp_val < 0 || tmp_val > port::kMaxUint32) { if (tmp_val < 0 || tmp_val > std::numeric_limits<uint32_t>::max()) {
fprintf(stderr, fprintf(stderr,
"compression_zstd_max_train_bytes must be a uint32_t: '%s'\n", "compression_zstd_max_train_bytes must be a uint32_t: '%s'\n",
argv[i]); argv[i]);

@ -190,7 +190,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, uint64_t op2) {
if (op1 == 0 || op2 == 0) { if (op1 == 0 || op2 == 0) {
return 0; return 0;
} }
if (port::kMaxUint64 / op1 < op2) { if (std::numeric_limits<uint64_t>::max() / op1 < op2) {
return op1; return op1;
} }
return (op1 * op2); return (op1 * op2);

@ -281,7 +281,7 @@ class BlockCacheTracer {
const Slice& block_key, const Slice& cf_name, const Slice& block_key, const Slice& cf_name,
const Slice& referenced_key); const Slice& referenced_key);
// GetId cycles from 1 to port::kMaxUint64. // GetId cycles from 1 to std::numeric_limits<uint64_t>::max().
uint64_t NextGetId(); uint64_t NextGetId();
private: private:

@ -101,7 +101,9 @@ class BinaryHeap {
size_t size() const { return data_.size(); } size_t size() const { return data_.size(); }
void reset_root_cmp_cache() { root_cmp_cache_ = port::kMaxSizet; } void reset_root_cmp_cache() {
root_cmp_cache_ = std::numeric_limits<size_t>::max();
}
private: private:
static inline size_t get_root() { return 0; } static inline size_t get_root() { return 0; }
@ -126,7 +128,7 @@ class BinaryHeap {
void downheap(size_t index) { void downheap(size_t index) {
T v = std::move(data_[index]); T v = std::move(data_[index]);
size_t picked_child = port::kMaxSizet; size_t picked_child = std::numeric_limits<size_t>::max();
while (1) { while (1) {
const size_t left_child = get_left(index); const size_t left_child = get_left(index);
if (get_left(index) >= data_.size()) { if (get_left(index) >= data_.size()) {
@ -165,7 +167,7 @@ class BinaryHeap {
Compare cmp_; Compare cmp_;
autovector<T> data_; autovector<T> data_;
// Used to reduce number of cmp_ calls in downheap() // Used to reduce number of cmp_ calls in downheap()
size_t root_cmp_cache_ = port::kMaxSizet; size_t root_cmp_cache_ = std::numeric_limits<size_t>::max();
}; };
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

@ -347,10 +347,11 @@ void GenericRateLimiter::RefillBytesAndGrantRequests() {
int64_t GenericRateLimiter::CalculateRefillBytesPerPeriod( int64_t GenericRateLimiter::CalculateRefillBytesPerPeriod(
int64_t rate_bytes_per_sec) { int64_t rate_bytes_per_sec) {
if (port::kMaxInt64 / rate_bytes_per_sec < options_.refill_period_us) { if (std::numeric_limits<int64_t>::max() / rate_bytes_per_sec <
options_.refill_period_us) {
// Avoid unexpected result in the overflow case. The result now is still // Avoid unexpected result in the overflow case. The result now is still
// inaccurate but is a number that is large enough. // inaccurate but is a number that is large enough.
return port::kMaxInt64 / 1000000; return std::numeric_limits<int64_t>::max() / 1000000;
} else { } else {
return rate_bytes_per_sec * options_.refill_period_us / 1000000; return rate_bytes_per_sec * options_.refill_period_us / 1000000;
} }
@ -374,7 +375,7 @@ Status GenericRateLimiter::Tune() {
std::chrono::microseconds(options_.refill_period_us); std::chrono::microseconds(options_.refill_period_us);
// We tune every kRefillsPerTune intervals, so the overflow and division-by- // We tune every kRefillsPerTune intervals, so the overflow and division-by-
// zero conditions should never happen. // zero conditions should never happen.
assert(num_drains_ <= port::kMaxInt64 / 100); assert(num_drains_ <= std::numeric_limits<int64_t>::max() / 100);
assert(elapsed_intervals > 0); assert(elapsed_intervals > 0);
int64_t drained_pct = num_drains_ * 100 / elapsed_intervals; int64_t drained_pct = num_drains_ * 100 / elapsed_intervals;
@ -385,14 +386,15 @@ Status GenericRateLimiter::Tune() {
} else if (drained_pct < kLowWatermarkPct) { } else if (drained_pct < kLowWatermarkPct) {
// sanitize to prevent overflow // sanitize to prevent overflow
int64_t sanitized_prev_bytes_per_sec = int64_t sanitized_prev_bytes_per_sec =
std::min(prev_bytes_per_sec, port::kMaxInt64 / 100); std::min(prev_bytes_per_sec, std::numeric_limits<int64_t>::max() / 100);
new_bytes_per_sec = new_bytes_per_sec =
std::max(options_.max_bytes_per_sec / kAllowedRangeFactor, std::max(options_.max_bytes_per_sec / kAllowedRangeFactor,
sanitized_prev_bytes_per_sec * 100 / (100 + kAdjustFactorPct)); sanitized_prev_bytes_per_sec * 100 / (100 + kAdjustFactorPct));
} else if (drained_pct > kHighWatermarkPct) { } else if (drained_pct > kHighWatermarkPct) {
// sanitize to prevent overflow // sanitize to prevent overflow
int64_t sanitized_prev_bytes_per_sec = std::min( int64_t sanitized_prev_bytes_per_sec =
prev_bytes_per_sec, port::kMaxInt64 / (100 + kAdjustFactorPct)); std::min(prev_bytes_per_sec, std::numeric_limits<int64_t>::max() /
(100 + kAdjustFactorPct));
new_bytes_per_sec = new_bytes_per_sec =
std::min(options_.max_bytes_per_sec, std::min(options_.max_bytes_per_sec,
sanitized_prev_bytes_per_sec * (100 + kAdjustFactorPct) / 100); sanitized_prev_bytes_per_sec * (100 + kAdjustFactorPct) / 100);
@ -433,7 +435,8 @@ static int RegisterBuiltinRateLimiters(ObjectLibrary& library,
GenericRateLimiter::kClassName(), GenericRateLimiter::kClassName(),
[](const std::string& /*uri*/, std::unique_ptr<RateLimiter>* guard, [](const std::string& /*uri*/, std::unique_ptr<RateLimiter>* guard,
std::string* /*errmsg*/) { std::string* /*errmsg*/) {
guard->reset(new GenericRateLimiter(port::kMaxInt64)); guard->reset(
new GenericRateLimiter(std::numeric_limits<int64_t>::max()));
return guard->get(); return guard->get();
}); });
size_t num_types; size_t num_types;

@ -36,7 +36,7 @@ class RateLimiterTest : public testing::Test {
}; };
TEST_F(RateLimiterTest, OverflowRate) { TEST_F(RateLimiterTest, OverflowRate) {
GenericRateLimiter limiter(port::kMaxInt64, 1000, 10, GenericRateLimiter limiter(std::numeric_limits<int64_t>::max(), 1000, 10,
RateLimiter::Mode::kWritesOnly, RateLimiter::Mode::kWritesOnly,
SystemClock::Default(), false /* auto_tuned */); SystemClock::Default(), false /* auto_tuned */);
ASSERT_GT(limiter.GetSingleBurstBytes(), 1000000000ll); ASSERT_GT(limiter.GetSingleBurstBytes(), 1000000000ll);

@ -315,7 +315,8 @@ uint32_t ParseUint32(const std::string& value) {
int32_t ParseInt32(const std::string& value) { int32_t ParseInt32(const std::string& value) {
int64_t num = ParseInt64(value); int64_t num = ParseInt64(value);
if (num <= port::kMaxInt32 && num >= port::kMinInt32) { if (num <= std::numeric_limits<int32_t>::max() &&
num >= std::numeric_limits<int32_t>::min()) {
return static_cast<int32_t>(num); return static_cast<int32_t>(num);
} else { } else {
throw std::out_of_range(value); throw std::out_of_range(value);

@ -1012,8 +1012,9 @@ IOStatus BackupEngineImpl::Initialize() {
// we might need to clean up from previous crash or I/O errors // we might need to clean up from previous crash or I/O errors
might_need_garbage_collect_ = true; might_need_garbage_collect_ = true;
if (options_.max_valid_backups_to_open != port::kMaxInt32) { if (options_.max_valid_backups_to_open !=
options_.max_valid_backups_to_open = port::kMaxInt32; std::numeric_limits<int32_t>::max()) {
options_.max_valid_backups_to_open = std::numeric_limits<int32_t>::max();
ROCKS_LOG_WARN( ROCKS_LOG_WARN(
options_.info_log, options_.info_log,
"`max_valid_backups_to_open` is not set to the default value. Ignoring " "`max_valid_backups_to_open` is not set to the default value. Ignoring "
@ -1434,7 +1435,8 @@ IOStatus BackupEngineImpl::CreateNewBackupWithMetadata(
contents.size(), db_options.statistics.get(), 0 /* size_limit */, contents.size(), db_options.statistics.get(), 0 /* size_limit */,
false /* shared_checksum */, options.progress_callback, contents); false /* shared_checksum */, options.progress_callback, contents);
} /* create_file_cb */, } /* create_file_cb */,
&sequence_number, options.flush_before_backup ? 0 : port::kMaxUint64, &sequence_number,
options.flush_before_backup ? 0 : std::numeric_limits<uint64_t>::max(),
compare_checksum)); compare_checksum));
if (io_s.ok()) { if (io_s.ok()) {
new_backup->SetSequenceNumber(sequence_number); new_backup->SetSequenceNumber(sequence_number);
@ -2171,7 +2173,7 @@ IOStatus BackupEngineImpl::AddBackupFileWorkItem(
return io_s; return io_s;
} }
} }
if (size_bytes == port::kMaxUint64) { if (size_bytes == std::numeric_limits<uint64_t>::max()) {
return IOStatus::NotFound("File missing: " + src_path); return IOStatus::NotFound("File missing: " + src_path);
} }
// dst_relative depends on the following conditions: // dst_relative depends on the following conditions:

@ -3756,7 +3756,8 @@ TEST_F(BackupEngineTest, WriteOnlyEngineNoSharedFileDeletion) {
} }
CloseDBAndBackupEngine(); CloseDBAndBackupEngine();
engine_options_->max_valid_backups_to_open = port::kMaxInt32; engine_options_->max_valid_backups_to_open =
std::numeric_limits<int32_t>::max();
AssertBackupConsistency(i + 1, 0, (i + 1) * kNumKeys); AssertBackupConsistency(i + 1, 0, (i + 1) * kNumKeys);
} }
} }

@ -95,7 +95,7 @@ struct WriteBatchIndexEntry {
bool is_forward_direction, bool is_seek_to_first) bool is_forward_direction, bool is_seek_to_first)
// For SeekForPrev(), we need to make the dummy entry larger than any // For SeekForPrev(), we need to make the dummy entry larger than any
// entry who has the same search key. Otherwise, we'll miss those entries. // entry who has the same search key. Otherwise, we'll miss those entries.
: offset(is_forward_direction ? 0 : port::kMaxSizet), : offset(is_forward_direction ? 0 : std::numeric_limits<size_t>::max()),
column_family(_column_family), column_family(_column_family),
key_offset(0), key_offset(0),
key_size(is_seek_to_first ? kFlagMinInCf : 0), key_size(is_seek_to_first ? kFlagMinInCf : 0),
@ -105,7 +105,7 @@ struct WriteBatchIndexEntry {
// If this flag appears in the key_size, it indicates a // If this flag appears in the key_size, it indicates a
// key that is smaller than any other entry for the same column family. // key that is smaller than any other entry for the same column family.
static const size_t kFlagMinInCf = port::kMaxSizet; static const size_t kFlagMinInCf = std::numeric_limits<size_t>::max();
bool is_min_in_cf() const { bool is_min_in_cf() const {
assert(key_size != kFlagMinInCf || assert(key_size != kFlagMinInCf ||

Loading…
Cancel
Save