Turn on -Wshorten-64-to-32 and fix all the errors

Summary:
We need to turn on -Wshorten-64-to-32 for mobile. See D1671432 (internal phabricator) for details.

This diff turns on the warning flag and fixes all the errors. There were also some interesting errors that I might call bugs, especially in plain table. Going forward, I think it makes sense to have this flag turned on and be very very careful when converting 64-bit to 32-bit variables.

Test Plan: compiles

Reviewers: ljin, rven, yhchiang, sdong

Reviewed By: yhchiang

Subscribers: bobbaldwin, dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D28689
main
Igor Canadi 10 years ago
parent 113796c493
commit 767777c2bd
  1. 1
      Makefile
  2. 8
      build_tools/build_detect_platform
  3. 12
      db/c.cc
  4. 2
      db/c_test.c
  5. 12
      db/column_family_test.cc
  6. 14
      db/compaction.cc
  7. 12
      db/compaction.h
  8. 15
      db/compaction_job.cc
  9. 10
      db/compaction_picker.cc
  10. 10
      db/compaction_picker_test.cc
  11. 8
      db/comparator_db_test.cc
  12. 15
      db/corruption_test.cc
  13. 2
      db/cuckoo_table_db_test.cc
  14. 8
      db/db_bench.cc
  15. 5
      db/db_impl.cc
  16. 2
      db/db_iter_test.cc
  17. 27
      db/db_test.cc
  18. 3
      db/dbformat.cc
  19. 16
      db/dbformat.h
  20. 32
      db/file_indexer.cc
  21. 14
      db/file_indexer.h
  22. 9
      db/file_indexer_test.cc
  23. 6
      db/flush_job.cc
  24. 9
      db/forward_iterator.cc
  25. 6
      db/listener_test.cc
  26. 9
      db/log_and_apply_bench.cc
  27. 6
      db/log_test.cc
  28. 20
      db/memtable.cc
  29. 54
      db/merge_test.cc
  30. 2
      db/plain_table_db_test.cc
  31. 6
      db/prefix_test.cc
  32. 9
      db/skiplist_test.cc
  33. 6
      db/version_edit.h
  34. 3
      db/version_edit_test.cc
  35. 50
      db/version_set.cc
  36. 2
      db/version_set.h
  37. 2
      db/write_batch.cc
  38. 12
      include/rocksdb/env.h
  39. 11
      java/rocksjni/iterator.cc
  40. 4
      java/rocksjni/restorejni.cc
  41. 25
      java/rocksjni/rocksjni.cc
  42. 4
      java/rocksjni/slice.cc
  43. 5
      java/rocksjni/write_batch.cc
  44. 86
      port/port_posix.h
  45. 3
      table/block.cc
  46. 3
      table/block.h
  47. 9
      table/block_based_filter_block.cc
  48. 8
      table/block_based_table_builder.cc
  49. 10
      table/block_builder.cc
  50. 2
      table/block_hash_index.cc
  51. 4
      table/block_hash_index_test.cc
  52. 2
      table/block_prefix_index.cc
  53. 2
      table/block_test.cc
  54. 13
      table/cuckoo_table_builder.cc
  55. 8
      table/cuckoo_table_builder.h
  56. 39
      table/cuckoo_table_builder_test.cc
  57. 2
      table/cuckoo_table_factory.h
  58. 9
      table/cuckoo_table_reader.cc
  59. 4
      table/cuckoo_table_reader_test.cc
  60. 2
      table/format.cc
  61. 4
      table/full_filter_block_test.cc
  62. 11
      table/merger_test.cc
  63. 16
      table/plain_table_builder.cc
  64. 2
      table/plain_table_builder.h
  65. 19
      table/plain_table_index.cc
  66. 14
      table/plain_table_index.h
  67. 19
      table/plain_table_key_coding.cc
  68. 11
      table/plain_table_reader.cc
  69. 4
      table/plain_table_reader.h
  70. 2
      table/table_test.cc
  71. 2
      tools/blob_store_bench.cc
  72. 30
      tools/db_stress.cc
  73. 3
      util/auto_roll_logger.cc
  74. 2
      util/auto_roll_logger_test.cc
  75. 3
      util/benchharness.cc
  76. 14
      util/benchharness_test.cc
  77. 4
      util/blob_store.cc
  78. 14
      util/bloom.cc
  79. 3
      util/bloom_test.cc
  80. 2
      util/cache.cc
  81. 4
      util/cache_test.cc
  82. 10
      util/coding.h
  83. 6
      util/crc32c.cc
  84. 16
      util/dynamic_bloom_test.cc
  85. 6
      util/env_posix.cc
  86. 2
      util/hash.cc
  87. 5
      util/hash_cuckoo_rep.cc
  88. 3
      util/hash_linklist_rep.cc
  89. 3
      util/hash_skiplist_rep.cc
  90. 5
      util/ldb_cmd.cc
  91. 14
      util/mock_env.cc
  92. 2
      util/murmurhash.h
  93. 2
      util/mutable_cf_options.cc
  94. 2
      util/mutable_cf_options.h
  95. 8
      util/options_builder.cc
  96. 10
      util/options_helper.cc
  97. 3
      util/rate_limiter.cc
  98. 11
      util/rate_limiter_test.cc
  99. 4
      util/status.cc
  100. 7
      utilities/backupable/backupable_db.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -147,7 +147,6 @@ TESTS = \
cuckoo_table_builder_test \
cuckoo_table_reader_test \
cuckoo_table_db_test \
write_batch_with_index_test \
flush_job_test \
wal_manager_test \
listener_test \

@ -284,6 +284,14 @@ EOF
fi
fi
# Test whether -Wshorten-64-to-32 is available
$CXX $CFLAGS -x c++ - -o /dev/null -Wshorten-64-to-32 2>/dev/null <<EOF
int main() {}
EOF
if [ "$?" = 0 ]; then
COMMON_FLAGS="$COMMON_FLAGS -Wshorten-64-to-32"
fi
# shall we use HDFS?
if test "$USE_HDFS"; then

@ -385,11 +385,9 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
unsigned char success;
size_t new_value_len;
char* tmp_new_value = (*full_merge_)(
state_,
key.data(), key.size(),
existing_value_data, existing_value_len,
&operand_pointers[0], &operand_sizes[0], n,
&success, &new_value_len);
state_, key.data(), key.size(), existing_value_data, existing_value_len,
&operand_pointers[0], &operand_sizes[0], static_cast<int>(n), &success,
&new_value_len);
new_value->assign(tmp_new_value, new_value_len);
if (delete_value_ != nullptr) {
@ -417,7 +415,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
size_t new_value_len;
char* tmp_new_value = (*partial_merge_)(
state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
operand_count, &success, &new_value_len);
static_cast<int>(operand_count), &success, &new_value_len);
new_value->assign(tmp_new_value, new_value_len);
if (delete_value_ != nullptr) {
@ -2041,7 +2039,7 @@ void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level
int rocksdb_livefiles_count(
const rocksdb_livefiles_t* lf) {
return lf->rep.size();
return static_cast<int>(lf->rep.size());
}
const char* rocksdb_livefiles_name(

@ -132,7 +132,7 @@ static void CmpDestroy(void* arg) { }
static int CmpCompare(void* arg, const char* a, size_t alen,
const char* b, size_t blen) {
int n = (alen < blen) ? alen : blen;
size_t n = (alen < blen) ? alen : blen;
int r = memcmp(a, b, n);
if (r == 0) {
if (alen < blen) r = -1;

@ -133,7 +133,7 @@ class ColumnFamilyTest {
void CreateColumnFamilies(
const std::vector<std::string>& cfs,
const std::vector<ColumnFamilyOptions> options = {}) {
int cfi = handles_.size();
int cfi = static_cast<int>(handles_.size());
handles_.resize(cfi + cfs.size());
names_.resize(cfi + cfs.size());
for (size_t i = 0; i < cfs.size(); ++i) {
@ -231,7 +231,7 @@ class ColumnFamilyTest {
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
result += buf;
if (f > 0) {
last_non_zero_offset = result.size();
last_non_zero_offset = static_cast<int>(result.size());
}
}
result.resize(last_non_zero_offset);
@ -287,8 +287,8 @@ class ColumnFamilyTest {
assert(num_per_cf.size() == handles_.size());
for (size_t i = 0; i < num_per_cf.size(); ++i) {
ASSERT_EQ(num_per_cf[i],
GetProperty(i, "rocksdb.num-immutable-mem-table"));
ASSERT_EQ(num_per_cf[i], GetProperty(static_cast<int>(i),
"rocksdb.num-immutable-mem-table"));
}
}
@ -916,11 +916,11 @@ TEST(ColumnFamilyTest, DontRollEmptyLogs) {
CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
for (size_t i = 0; i < handles_.size(); ++i) {
PutRandomData(i, 10, 100);
PutRandomData(static_cast<int>(i), 10, 100);
}
int num_writable_file_start = env_->GetNumberOfNewWritableFileCalls();
// this will trigger the flushes
for (size_t i = 0; i <= 4; ++i) {
for (int i = 0; i <= 4; ++i) {
ASSERT_OK(Flush(i));
}

@ -124,9 +124,9 @@ Compaction::~Compaction() {
void Compaction::GenerateFileLevels() {
input_levels_.resize(num_input_levels());
for (int which = 0; which < num_input_levels(); which++) {
DoGenerateLevelFilesBrief(
&input_levels_[which], inputs_[which].files, &arena_);
for (size_t which = 0; which < num_input_levels(); which++) {
DoGenerateLevelFilesBrief(&input_levels_[which], inputs_[which].files,
&arena_);
}
}
@ -144,7 +144,7 @@ bool Compaction::IsTrivialMove() const {
}
void Compaction::AddInputDeletions(VersionEdit* out_edit) {
for (int which = 0; which < num_input_levels(); which++) {
for (size_t which = 0; which < num_input_levels(); which++) {
for (size_t i = 0; i < inputs_[which].size(); i++) {
out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber());
}
@ -207,7 +207,7 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
// Mark (or clear) each file that is being compacted
void Compaction::MarkFilesBeingCompacted(bool mark_as_compacted) {
for (int i = 0; i < num_input_levels(); i++) {
for (size_t i = 0; i < num_input_levels(); i++) {
for (unsigned int j = 0; j < inputs_[i].size(); j++) {
assert(mark_as_compacted ? !inputs_[i][j]->being_compacted :
inputs_[i][j]->being_compacted);
@ -293,7 +293,7 @@ void Compaction::Summary(char* output, int len) {
return;
}
for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
if (level_iter > 0) {
write += snprintf(output + write, len - write, "], [");
if (write < 0 || write >= len) {
@ -317,7 +317,7 @@ uint64_t Compaction::OutputFilePreallocationSize(
if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) {
preallocation_size = mutable_options.MaxFileSizeForLevel(output_level());
} else {
for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
for (const auto& f : inputs_[level_iter].files) {
preallocation_size += f->fd.GetFileSize();
}

@ -23,7 +23,7 @@ struct CompactionInputFiles {
inline bool empty() const { return files.empty(); }
inline size_t size() const { return files.size(); }
inline void clear() { files.clear(); }
inline FileMetaData* operator[](int i) const { return files[i]; }
inline FileMetaData* operator[](size_t i) const { return files[i]; }
};
class Version;
@ -48,7 +48,7 @@ class Compaction {
// Returns the level associated to the specified compaction input level.
// If compaction_input_level is not specified, then input_level is set to 0.
int level(int compaction_input_level = 0) const {
int level(size_t compaction_input_level = 0) const {
return inputs_[compaction_input_level].level;
}
@ -56,7 +56,7 @@ class Compaction {
int output_level() const { return output_level_; }
// Returns the number of input levels in this compaction.
int num_input_levels() const { return inputs_.size(); }
size_t num_input_levels() const { return inputs_.size(); }
// Return the object that holds the edits to the descriptor done
// by this compaction.
@ -66,7 +66,7 @@ class Compaction {
// compaction input level.
// The function will return 0 if when "compaction_input_level" < 0
// or "compaction_input_level" >= "num_input_levels()".
int num_input_files(size_t compaction_input_level) const {
size_t num_input_files(size_t compaction_input_level) const {
if (compaction_input_level < inputs_.size()) {
return inputs_[compaction_input_level].size();
}
@ -83,7 +83,7 @@ class Compaction {
// specified compaction input level.
// REQUIREMENT: "compaction_input_level" must be >= 0 and
// < "input_levels()"
FileMetaData* input(size_t compaction_input_level, int i) const {
FileMetaData* input(size_t compaction_input_level, size_t i) const {
assert(compaction_input_level < inputs_.size());
return inputs_[compaction_input_level][i];
}
@ -98,7 +98,7 @@ class Compaction {
}
// Returns the LevelFilesBrief of the specified compaction input level.
LevelFilesBrief* input_levels(int compaction_input_level) {
LevelFilesBrief* input_levels(size_t compaction_input_level) {
return &input_levels_[compaction_input_level];
}

@ -415,32 +415,33 @@ Status CompactionJob::Run() {
}
compaction_stats_.micros = env_->NowMicros() - start_micros - imm_micros;
compaction_stats_.files_in_leveln = compact_->compaction->num_input_files(0);
compaction_stats_.files_in_leveln =
static_cast<int>(compact_->compaction->num_input_files(0));
compaction_stats_.files_in_levelnp1 =
compact_->compaction->num_input_files(1);
static_cast<int>(compact_->compaction->num_input_files(1));
MeasureTime(stats_, COMPACTION_TIME, compaction_stats_.micros);
int num_output_files = compact_->outputs.size();
size_t num_output_files = compact_->outputs.size();
if (compact_->builder != nullptr) {
// An error occurred so ignore the last output.
assert(num_output_files > 0);
--num_output_files;
}
compaction_stats_.files_out_levelnp1 = num_output_files;
compaction_stats_.files_out_levelnp1 = static_cast<int>(num_output_files);
for (int i = 0; i < compact_->compaction->num_input_files(0); i++) {
for (size_t i = 0; i < compact_->compaction->num_input_files(0); i++) {
compaction_stats_.bytes_readn +=
compact_->compaction->input(0, i)->fd.GetFileSize();
compaction_stats_.num_input_records +=
static_cast<uint64_t>(compact_->compaction->input(0, i)->num_entries);
}
for (int i = 0; i < compact_->compaction->num_input_files(1); i++) {
for (size_t i = 0; i < compact_->compaction->num_input_files(1); i++) {
compaction_stats_.bytes_readnp1 +=
compact_->compaction->input(1, i)->fd.GetFileSize();
}
for (int i = 0; i < num_output_files; i++) {
for (size_t i = 0; i < num_output_files; i++) {
compaction_stats_.bytes_written += compact_->outputs[i].file_size;
}
if (compact_->num_input_records > compact_->num_output_records) {

@ -46,7 +46,7 @@ CompressionType GetCompressionType(
// If the use has specified a different compression level for each level,
// then pick the compression for that level.
if (!ioptions.compression_per_level.empty()) {
const int n = ioptions.compression_per_level.size() - 1;
const int n = static_cast<int>(ioptions.compression_per_level.size()) - 1;
// It is possible for level_ to be -1; in that case, we use level
// 0's compression. This occurs mostly in backwards compatibility
// situations when the builder doesn't know what level the file
@ -75,7 +75,7 @@ void CompactionPicker::SizeBeingCompacted(std::vector<uint64_t>& sizes) {
uint64_t total = 0;
for (auto c : compactions_in_progress_[level]) {
assert(c->level() == level);
for (int i = 0; i < c->num_input_files(0); i++) {
for (size_t i = 0; i < c->num_input_files(0); i++) {
total += c->input(0, i)->compensated_file_size;
}
}
@ -870,7 +870,8 @@ Compaction* UniversalCompactionPicker::PickCompaction(
// If max read amplification is exceeding configured limits, then force
// compaction without looking at filesize ratios and try to reduce
// the number of files to fewer than level0_file_num_compaction_trigger.
unsigned int num_files = level_files.size() -
unsigned int num_files =
static_cast<unsigned int>(level_files.size()) -
mutable_cf_options.level0_file_num_compaction_trigger;
if ((c = PickCompactionUniversalReadAmp(
cf_name, mutable_cf_options, vstorage, score, UINT_MAX,
@ -1074,8 +1075,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
if (ratio_to_compress >= 0) {
uint64_t total_size = vstorage->NumLevelBytes(kLevel0);
uint64_t older_file_size = 0;
for (unsigned int i = files.size() - 1;
i >= first_index_after; i--) {
for (size_t i = files.size() - 1; i >= first_index_after; i--) {
older_file_size += files[i]->fd.GetFileSize();
if (older_file_size * 100L >= total_size * (long) ratio_to_compress) {
enable_compression = false;

@ -109,7 +109,7 @@ TEST(CompactionPickerTest, Level0Trigger) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(2, compaction->num_input_files(0));
ASSERT_EQ(2U, compaction->num_input_files(0));
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
}
@ -121,7 +121,7 @@ TEST(CompactionPickerTest, Level1Trigger) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0));
ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
}
@ -136,8 +136,8 @@ TEST(CompactionPickerTest, Level1Trigger2) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0));
ASSERT_EQ(2, compaction->num_input_files(1));
ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(2U, compaction->num_input_files(1));
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
@ -164,7 +164,7 @@ TEST(CompactionPickerTest, LevelMaxScore) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0));
ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber());
}

@ -82,7 +82,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
}
int type = rnd->Uniform(2);
int index = rnd->Uniform(source_strings.size());
int index = rnd->Uniform(static_cast<int>(source_strings.size()));
auto& key = source_strings[index];
switch (type) {
case 0:
@ -124,7 +124,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
break;
case 2: {
// Seek to random key
auto key_idx = rnd->Uniform(source_strings.size());
auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
auto key = source_strings[key_idx];
iter->Seek(key);
result_iter->Seek(key);
@ -150,7 +150,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
break;
default: {
assert(type == 5);
auto key_idx = rnd->Uniform(source_strings.size());
auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
auto key = source_strings[key_idx];
std::string result;
auto status = db->Get(ReadOptions(), key, &result);
@ -325,7 +325,7 @@ TEST(ComparatorDBTest, SimpleSuffixReverseComparator) {
source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8));
}
for (int j = 0; j < 20; j++) {
int prefix_index = rnd.Uniform(source_prefixes.size());
int prefix_index = rnd.Uniform(static_cast<int>(source_prefixes.size()));
std::string key = source_prefixes[prefix_index] +
test::RandomHumanReadableString(&rnd, rnd.Uniform(8));
source_strings.push_back(key);

@ -115,8 +115,8 @@ class CorruptionTest {
continue;
}
missed += (key - next_expected);
next_expected = key + 1;
if (iter->value() != Value(key, &value_space)) {
next_expected = static_cast<unsigned int>(key + 1);
if (iter->value() != Value(static_cast<int>(key), &value_space)) {
bad_values++;
} else {
correct++;
@ -143,14 +143,14 @@ class CorruptionTest {
if (-offset > sbuf.st_size) {
offset = 0;
} else {
offset = sbuf.st_size + offset;
offset = static_cast<int>(sbuf.st_size + offset);
}
}
if (offset > sbuf.st_size) {
offset = sbuf.st_size;
offset = static_cast<int>(sbuf.st_size);
}
if (offset + bytes_to_corrupt > sbuf.st_size) {
bytes_to_corrupt = sbuf.st_size - offset;
bytes_to_corrupt = static_cast<int>(sbuf.st_size - offset);
}
// Do it
@ -177,7 +177,7 @@ class CorruptionTest {
type == filetype &&
static_cast<int>(number) > picked_number) { // Pick latest file
fname = dbname_ + "/" + filenames[i];
picked_number = number;
picked_number = static_cast<int>(number);
}
}
ASSERT_TRUE(!fname.empty()) << filetype;
@ -246,7 +246,8 @@ TEST(CorruptionTest, RecoverWriteError) {
TEST(CorruptionTest, NewFileErrorDuringWrite) {
// Do enough writing to force minor compaction
env_.writable_file_error_ = true;
const int num = 3 + (Options().write_buffer_size / kValueSize);
const int num =
static_cast<int>(3 + (Options().write_buffer_size / kValueSize));
std::string value_storage;
Status s;
bool failed = false;

@ -92,7 +92,7 @@ class CuckooTableDBTest {
// Return spread of files per level
std::string FilesPerLevel() {
std::string result;
int last_non_zero_offset = 0;
size_t last_non_zero_offset = 0;
for (int level = 0; level < db_->NumberLevels(); level++) {
int f = NumTableFilesAtLevel(level);
char buf[100];

@ -251,7 +251,8 @@ DEFINE_int32(universal_compression_size_percent, -1,
DEFINE_int64(cache_size, -1, "Number of bytes to use as a cache of uncompressed"
"data. Negative means use default settings.");
DEFINE_int32(block_size, rocksdb::BlockBasedTableOptions().block_size,
DEFINE_int32(block_size,
static_cast<int32_t>(rocksdb::BlockBasedTableOptions().block_size),
"Number of bytes in a block.");
DEFINE_int32(block_restart_interval,
@ -2111,8 +2112,9 @@ class Benchmark {
for (uint64_t i = 0; i < num_; ++i) {
values_[i] = i;
}
std::shuffle(values_.begin(), values_.end(),
std::default_random_engine(FLAGS_seed));
std::shuffle(
values_.begin(), values_.end(),
std::default_random_engine(static_cast<unsigned int>(FLAGS_seed)));
}
}

@ -2252,7 +2252,7 @@ SuperVersion* DBImpl::InstallSuperVersion(
MaybeScheduleFlushOrCompaction();
// Update max_total_in_memory_state_
auto old_memtable_size = 0;
size_t old_memtable_size = 0;
if (old) {
old_memtable_size = old->mutable_cf_options.write_buffer_size *
old->mutable_cf_options.max_write_buffer_number;
@ -2920,7 +2920,8 @@ Status DBImpl::DelayWrite(uint64_t expiration_time) {
auto delay = write_controller_.GetDelay();
if (write_controller_.IsStopped() == false && delay > 0) {
mutex_.Unlock();
env_->SleepForMicroseconds(delay);
// hopefully we don't have to sleep more than 2 billion microseconds
env_->SleepForMicroseconds(static_cast<int>(delay));
mutex_.Lock();
}

@ -19,7 +19,7 @@
namespace rocksdb {
static uint32_t TestGetTickerCount(const Options& options,
static uint64_t TestGetTickerCount(const Options& options,
Tickers ticker_type) {
return options.statistics->getTickerCount(ticker_type);
}

@ -668,7 +668,7 @@ class DBTest {
void CreateColumnFamilies(const std::vector<std::string>& cfs,
const Options& options) {
ColumnFamilyOptions cf_opts(options);
int cfi = handles_.size();
size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size());
for (auto cf : cfs) {
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
@ -933,7 +933,7 @@ class DBTest {
int num_levels =
(cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
std::string result;
int last_non_zero_offset = 0;
size_t last_non_zero_offset = 0;
for (int level = 0; level < num_levels; level++) {
int f = NumTableFilesAtLevel(level, cf);
char buf[100];
@ -947,7 +947,7 @@ class DBTest {
return result;
}
int CountFiles() {
size_t CountFiles() {
std::vector<std::string> files;
env_->GetChildren(dbname_, &files);
@ -956,10 +956,10 @@ class DBTest {
env_->GetChildren(last_options_.wal_dir, &logfiles);
}
return static_cast<int>(files.size() + logfiles.size());
return files.size() + logfiles.size();
}
int CountLiveFiles() {
size_t CountLiveFiles() {
std::vector<LiveFileMetaData> metadata;
db_->GetLiveFilesMetaData(&metadata);
return metadata.size();
@ -4326,7 +4326,8 @@ TEST(DBTest, RepeatedWritesToSameKey) {
options.num_levels + options.level0_stop_writes_trigger;
Random rnd(301);
std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
std::string value =
RandomString(&rnd, static_cast<int>(2 * options.write_buffer_size));
for (int i = 0; i < 5 * kMaxFiles; i++) {
ASSERT_OK(Put(1, "key", value));
ASSERT_LE(TotalTableFiles(1), kMaxFiles);
@ -4657,7 +4658,7 @@ TEST(DBTest, CompactionFilterDeletesAll) {
// this will produce empty file (delete compaction filter)
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
ASSERT_EQ(0, CountLiveFiles());
ASSERT_EQ(0U, CountLiveFiles());
Reopen(options);
@ -5845,7 +5846,7 @@ TEST(DBTest, DropWrites) {
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
const size_t num_files = CountFiles();
// Force out-of-space errors
env_->drop_writes_.store(true, std::memory_order_release);
env_->sleep_counter_.Reset();
@ -6031,7 +6032,7 @@ TEST(DBTest, FilesDeletedAfterCompaction) {
CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
ASSERT_OK(Put(1, "foo", "v2"));
Compact(1, "a", "z");
const int num_files = CountLiveFiles();
const size_t num_files = CountLiveFiles();
for (int i = 0; i < 10; i++) {
ASSERT_OK(Put(1, "foo", "v2"));
Compact(1, "a", "z");
@ -6504,7 +6505,7 @@ TEST(DBTest, FlushOneColumnFamily) {
ASSERT_OK(Put(6, "alyosha", "alyosha"));
ASSERT_OK(Put(7, "popovich", "popovich"));
for (size_t i = 0; i < 8; ++i) {
for (int i = 0; i < 8; ++i) {
Flush(i);
auto tables = ListTableFiles(env_, dbname_);
ASSERT_EQ(tables.size(), i + 1U);
@ -6848,8 +6849,8 @@ TEST(DBTest, TransactionLogIteratorCorruptedLog) {
// than 1025 entries
auto iter = OpenTransactionLogIter(0);
int count;
int last_sequence_read = ReadRecords(iter, count);
ASSERT_LT(last_sequence_read, 1025);
SequenceNumber last_sequence_read = ReadRecords(iter, count);
ASSERT_LT(last_sequence_read, 1025U);
// Try to read past the gap, should be able to seek to key1025
auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
ExpectRecords(1, iter2);
@ -8358,7 +8359,7 @@ TEST(DBTest, CompactFilesOnLevelCompaction) {
ColumnFamilyMetaData cf_meta;
dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
int output_level = cf_meta.levels.size() - 1;
int output_level = static_cast<int>(cf_meta.levels.size()) - 1;
for (int file_picked = 5; file_picked > 0; --file_picked) {
std::set<std::string> overlapping_file_names;
std::vector<std::string> compaction_input_file_names;

@ -137,7 +137,8 @@ LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) {
dst = new char[needed];
}
start_ = dst;
dst = EncodeVarint32(dst, usize + 8);
// NOTE: We don't support users keys of more than 2GB :)
dst = EncodeVarint32(dst, static_cast<uint32_t>(usize + 8));
kstart_ = dst;
memcpy(dst, _user_key.data(), usize);
dst += usize;

@ -206,13 +206,19 @@ class LookupKey {
~LookupKey();
// Return a key suitable for lookup in a MemTable.
Slice memtable_key() const { return Slice(start_, end_ - start_); }
Slice memtable_key() const {
return Slice(start_, static_cast<size_t>(end_ - start_));
}
// Return an internal key (suitable for passing to an internal iterator)
Slice internal_key() const { return Slice(kstart_, end_ - kstart_); }
Slice internal_key() const {
return Slice(kstart_, static_cast<size_t>(end_ - kstart_));
}
// Return the user key
Slice user_key() const { return Slice(kstart_, end_ - kstart_ - 8); }
Slice user_key() const {
return Slice(kstart_, static_cast<size_t>(end_ - kstart_ - 8));
}
private:
// We construct a char array of the form:
@ -319,8 +325,8 @@ class IterKey {
void EncodeLengthPrefixedKey(const Slice& key) {
auto size = key.size();
EnlargeBufferIfNeeded(size + VarintLength(size));
char* ptr = EncodeVarint32(key_, size);
EnlargeBufferIfNeeded(size + static_cast<size_t>(VarintLength(size)));
char* ptr = EncodeVarint32(key_, static_cast<uint32_t>(size));
memcpy(ptr, key.data(), size);
}

@ -17,17 +17,16 @@ namespace rocksdb {
FileIndexer::FileIndexer(const Comparator* ucmp)
: num_levels_(0), ucmp_(ucmp), level_rb_(nullptr) {}
uint32_t FileIndexer::NumLevelIndex() const {
return next_level_index_.size();
}
size_t FileIndexer::NumLevelIndex() const { return next_level_index_.size(); }
uint32_t FileIndexer::LevelIndexSize(uint32_t level) const {
size_t FileIndexer::LevelIndexSize(size_t level) const {
return next_level_index_[level].num_index;
}
void FileIndexer::GetNextLevelIndex(
const uint32_t level, const uint32_t file_index, const int cmp_smallest,
const int cmp_largest, int32_t* left_bound, int32_t* right_bound) const {
void FileIndexer::GetNextLevelIndex(const size_t level, const size_t file_index,
const int cmp_smallest,
const int cmp_largest, int32_t* left_bound,
int32_t* right_bound) const {
assert(level > 0);
// Last level, no hint
@ -69,7 +68,7 @@ void FileIndexer::GetNextLevelIndex(
assert(*right_bound <= level_rb_[level + 1]);
}
void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels,
void FileIndexer::UpdateIndex(Arena* arena, const size_t num_levels,
std::vector<FileMetaData*>* const files) {
if (files == nullptr) {
return;
@ -90,11 +89,11 @@ void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels,
}
// L1 - Ln-1
for (uint32_t level = 1; level < num_levels_ - 1; ++level) {
for (size_t level = 1; level < num_levels_ - 1; ++level) {
const auto& upper_files = files[level];
const int32_t upper_size = upper_files.size();
const int32_t upper_size = static_cast<int32_t>(upper_files.size());
const auto& lower_files = files[level + 1];
level_rb_[level] = upper_files.size() - 1;
level_rb_[level] = static_cast<int32_t>(upper_files.size()) - 1;
if (upper_size == 0) {
continue;
}
@ -129,7 +128,8 @@ void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels,
[](IndexUnit* index, int32_t f_idx) { index->largest_rb = f_idx; });
}
level_rb_[num_levels_ - 1] = files[num_levels_ - 1].size() - 1;
level_rb_[num_levels_ - 1] =
static_cast<int32_t>(files[num_levels_ - 1].size()) - 1;
}
void FileIndexer::CalculateLB(
@ -137,8 +137,8 @@ void FileIndexer::CalculateLB(
const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
std::function<void(IndexUnit*, int32_t)> set_index) {
const int32_t upper_size = upper_files.size();
const int32_t lower_size = lower_files.size();
const int32_t upper_size = static_cast<int32_t>(upper_files.size());
const int32_t lower_size = static_cast<int32_t>(lower_files.size());
int32_t upper_idx = 0;
int32_t lower_idx = 0;
@ -175,8 +175,8 @@ void FileIndexer::CalculateRB(
const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
std::function<void(IndexUnit*, int32_t)> set_index) {
const int32_t upper_size = upper_files.size();
const int32_t lower_size = lower_files.size();
const int32_t upper_size = static_cast<int32_t>(upper_files.size());
const int32_t lower_size = static_cast<int32_t>(lower_files.size());
int32_t upper_idx = upper_size - 1;
int32_t lower_idx = lower_size - 1;

@ -42,19 +42,19 @@ class FileIndexer {
public:
explicit FileIndexer(const Comparator* ucmp);
uint32_t NumLevelIndex() const;
size_t NumLevelIndex() const;
uint32_t LevelIndexSize(uint32_t level) const;
size_t LevelIndexSize(size_t level) const;
// Return a file index range in the next level to search for a key based on
// smallest and largest key comparision for the current file specified by
// level and file_index. When *left_index < *right_index, both index should
// be valid and fit in the vector size.
void GetNextLevelIndex(
const uint32_t level, const uint32_t file_index, const int cmp_smallest,
const int cmp_largest, int32_t* left_bound, int32_t* right_bound) const;
void GetNextLevelIndex(const size_t level, const size_t file_index,
const int cmp_smallest, const int cmp_largest,
int32_t* left_bound, int32_t* right_bound) const;
void UpdateIndex(Arena* arena, const uint32_t num_levels,
void UpdateIndex(Arena* arena, const size_t num_levels,
std::vector<FileMetaData*>* const files);
enum {
@ -62,7 +62,7 @@ class FileIndexer {
};
private:
uint32_t num_levels_;
size_t num_levels_;
const Comparator* ucmp_;
struct IndexUnit {

@ -22,8 +22,15 @@ class IntComparator : public Comparator {
int Compare(const Slice& a, const Slice& b) const {
assert(a.size() == 8);
assert(b.size() == 8);
return *reinterpret_cast<const int64_t*>(a.data()) -
int64_t diff = *reinterpret_cast<const int64_t*>(a.data()) -
*reinterpret_cast<const int64_t*>(b.data());
if (diff < 0) {
return -1;
} else if (diff == 0) {
return 0;
} else {
return 1;
}
}
const char* Name() const {

@ -150,9 +150,9 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
memtables.push_back(m->NewIterator(ro, &arena));
}
{
ScopedArenaIterator iter(NewMergingIterator(&cfd_->internal_comparator(),
&memtables[0],
memtables.size(), &arena));
ScopedArenaIterator iter(
NewMergingIterator(&cfd_->internal_comparator(), &memtables[0],
static_cast<int>(memtables.size()), &arena));
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] Level-0 flush table #%" PRIu64 ": started",
cfd_->GetName().c_str(), meta.fd.GetNumber());

@ -264,10 +264,11 @@ void ForwardIterator::SeekInternal(const Slice& internal_key,
if (search_left_bound == search_right_bound) {
f_idx = search_left_bound;
} else if (search_left_bound < search_right_bound) {
f_idx = FindFileInRange(
level_files, internal_key, search_left_bound,
search_right_bound == FileIndexer::kLevelMaxIndex ?
level_files.size() : search_right_bound);
f_idx =
FindFileInRange(level_files, internal_key, search_left_bound,
search_right_bound == FileIndexer::kLevelMaxIndex
? static_cast<uint32_t>(level_files.size())
: search_right_bound);
} else {
// search_left_bound > search_right_bound
// There are only 2 cases this can happen:

@ -59,7 +59,7 @@ class EventListenerTest {
const ColumnFamilyOptions* options = nullptr) {
ColumnFamilyOptions cf_opts;
cf_opts = ColumnFamilyOptions(Options());
int cfi = handles_.size();
size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size());
for (auto cf : cfs) {
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
@ -188,7 +188,7 @@ TEST(EventListenerTest, OnSingleDBFlushTest) {
ASSERT_OK(Put(6, "alyosha", "alyosha"));
ASSERT_OK(Put(7, "popovich", "popovich"));
for (size_t i = 1; i < 8; ++i) {
Flush(i);
Flush(static_cast<int>(i));
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(listener->flushed_dbs_.size(), i);
ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
@ -218,7 +218,7 @@ TEST(EventListenerTest, MultiCF) {
ASSERT_OK(Put(6, "alyosha", "alyosha"));
ASSERT_OK(Put(7, "popovich", "popovich"));
for (size_t i = 1; i < 8; ++i) {
Flush(i);
Flush(static_cast<int>(i));
ASSERT_EQ(listener->flushed_dbs_.size(), i);
ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
}

@ -6,6 +6,11 @@
#include <vector>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "util/testharness.h"
#include "util/benchharness.h"
#include "db/version_set.h"
@ -14,9 +19,9 @@
namespace rocksdb {
std::string MakeKey(unsigned int num) {
std::string MakeKey(uint64_t num) {
char buf[30];
snprintf(buf, sizeof(buf), "%016u", num);
snprintf(buf, sizeof(buf), "%016" PRIu64, num);
return std::string(buf);
}

@ -558,9 +558,9 @@ TEST(LogTest, ErrorJoinsRecords) {
ASSERT_EQ("correct", Read());
ASSERT_EQ("EOF", Read());
const unsigned int dropped = DroppedBytes();
ASSERT_LE(dropped, 2*kBlockSize + 100);
ASSERT_GE(dropped, 2*kBlockSize);
size_t dropped = DroppedBytes();
ASSERT_LE(dropped, 2 * kBlockSize + 100);
ASSERT_GE(dropped, 2 * kBlockSize);
}
TEST(LogTest, ReadStart) {

@ -188,7 +188,7 @@ KeyHandle MemTableRep::Allocate(const size_t len, char** buf) {
// into this scratch space.
const char* EncodeKey(std::string* scratch, const Slice& target) {
scratch->clear();
PutVarint32(scratch, target.size());
PutVarint32(scratch, static_cast<uint32_t>(target.size()));
scratch->append(target.data(), target.size());
return scratch->data();
}
@ -288,12 +288,12 @@ void MemTable::Add(SequenceNumber s, ValueType type,
// key bytes : char[internal_key.size()]
// value_size : varint32 of value.size()
// value bytes : char[value.size()]
size_t key_size = key.size();
size_t val_size = value.size();
size_t internal_key_size = key_size + 8;
const size_t encoded_len =
VarintLength(internal_key_size) + internal_key_size +
VarintLength(val_size) + val_size;
uint32_t key_size = static_cast<uint32_t>(key.size());
uint32_t val_size = static_cast<uint32_t>(value.size());
uint32_t internal_key_size = key_size + 8;
const uint32_t encoded_len = VarintLength(internal_key_size) +
internal_key_size + VarintLength(val_size) +
val_size;
char* buf = nullptr;
KeyHandle handle = table_->Allocate(encoded_len, &buf);
assert(buf != nullptr);
@ -502,8 +502,8 @@ void MemTable::Update(SequenceNumber seq,
switch (static_cast<ValueType>(tag & 0xff)) {
case kTypeValue: {
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
uint32_t prev_size = prev_value.size();
uint32_t new_size = value.size();
uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
uint32_t new_size = static_cast<uint32_t>(value.size());
// Update value, if new value size <= previous value size
if (new_size <= prev_size ) {
@ -560,7 +560,7 @@ bool MemTable::UpdateCallback(SequenceNumber seq,
switch (static_cast<ValueType>(tag & 0xff)) {
case kTypeValue: {
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
uint32_t prev_size = prev_value.size();
uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
char* prev_buffer = const_cast<char*>(prev_value.data());
uint32_t new_prev_size = prev_size;

@ -23,15 +23,11 @@ using namespace std;
using namespace rocksdb;
namespace {
int numMergeOperatorCalls;
void resetNumMergeOperatorCalls() {
numMergeOperatorCalls = 0;
}
size_t num_merge_operator_calls;
void resetNumMergeOperatorCalls() { num_merge_operator_calls = 0; }
int num_partial_merge_calls;
void resetNumPartialMergeCalls() {
num_partial_merge_calls = 0;
}
size_t num_partial_merge_calls;
void resetNumPartialMergeCalls() { num_partial_merge_calls = 0; }
}
class CountMergeOperator : public AssociativeMergeOperator {
@ -45,7 +41,7 @@ class CountMergeOperator : public AssociativeMergeOperator {
const Slice& value,
std::string* new_value,
Logger* logger) const override {
++numMergeOperatorCalls;
++num_merge_operator_calls;
if (existing_value == nullptr) {
new_value->assign(value.data(), value.size());
return true;
@ -307,31 +303,31 @@ void testCounters(Counters& counters, DB* db, bool test_compaction) {
}
}
void testSuccessiveMerge(
Counters& counters, int max_num_merges, int num_merges) {
void testSuccessiveMerge(Counters& counters, size_t max_num_merges,
size_t num_merges) {
counters.assert_remove("z");
uint64_t sum = 0;
for (int i = 1; i <= num_merges; ++i) {
for (size_t i = 1; i <= num_merges; ++i) {
resetNumMergeOperatorCalls();
counters.assert_add("z", i);
sum += i;
if (i % (max_num_merges + 1) == 0) {
assert(numMergeOperatorCalls == max_num_merges + 1);
assert(num_merge_operator_calls == max_num_merges + 1);
} else {
assert(numMergeOperatorCalls == 0);
assert(num_merge_operator_calls == 0);
}
resetNumMergeOperatorCalls();
assert(counters.assert_get("z") == sum);
assert(numMergeOperatorCalls == i % (max_num_merges + 1));
assert(num_merge_operator_calls == i % (max_num_merges + 1));
}
}
void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge,
int count) {
void testPartialMerge(Counters* counters, DB* db, size_t max_merge,
size_t min_merge, size_t count) {
FlushOptions o;
o.wait = true;
@ -339,7 +335,7 @@ void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge,
// operands exceeds the threshold.
uint64_t tmp_sum = 0;
resetNumPartialMergeCalls();
for (int i = 1; i <= count; i++) {
for (size_t i = 1; i <= count; i++) {
counters->assert_add("b", i);
tmp_sum += i;
}
@ -348,7 +344,7 @@ void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge,
ASSERT_EQ(tmp_sum, counters->assert_get("b"));
if (count > max_merge) {
// in this case, FullMerge should be called instead.
ASSERT_EQ(num_partial_merge_calls, 0);
ASSERT_EQ(num_partial_merge_calls, 0U);
} else {
// if count >= min_merge, then partial merge should be called once.
ASSERT_EQ((count >= min_merge), (num_partial_merge_calls == 1));
@ -358,20 +354,18 @@ void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge,
resetNumPartialMergeCalls();
tmp_sum = 0;
db->Put(rocksdb::WriteOptions(), "c", "10");
for (int i = 1; i <= count; i++) {
for (size_t i = 1; i <= count; i++) {
counters->assert_add("c", i);
tmp_sum += i;
}
db->Flush(o);
db->CompactRange(nullptr, nullptr);
ASSERT_EQ(tmp_sum, counters->assert_get("c"));
ASSERT_EQ(num_partial_merge_calls, 0);
ASSERT_EQ(num_partial_merge_calls, 0U);
}
void testSingleBatchSuccessiveMerge(
DB* db,
int max_num_merges,
int num_merges) {
void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
size_t num_merges) {
assert(num_merges > max_num_merges);
Slice key("BatchSuccessiveMerge");
@ -380,7 +374,7 @@ void testSingleBatchSuccessiveMerge(
// Create the batch
WriteBatch batch;
for (int i = 0; i < num_merges; ++i) {
for (size_t i = 0; i < num_merges; ++i) {
batch.Merge(key, merge_value_slice);
}
@ -390,8 +384,9 @@ void testSingleBatchSuccessiveMerge(
Status s = db->Write(WriteOptions(), &batch);
assert(s.ok());
}
assert(numMergeOperatorCalls ==
num_merges - (num_merges % (max_num_merges + 1)));
ASSERT_EQ(
num_merge_operator_calls,
static_cast<size_t>(num_merges - (num_merges % (max_num_merges + 1))));
// Get the value
resetNumMergeOperatorCalls();
@ -403,7 +398,8 @@ void testSingleBatchSuccessiveMerge(
assert(get_value_str.size() == sizeof(uint64_t));
uint64_t get_value = DecodeFixed64(&get_value_str[0]);
ASSERT_EQ(get_value, num_merges * merge_value);
ASSERT_EQ(numMergeOperatorCalls, (num_merges % (max_num_merges + 1)));
ASSERT_EQ(num_merge_operator_calls,
static_cast<size_t>((num_merges % (max_num_merges + 1))));
}
void runTest(int argc, const string& dbname, const bool use_ttl = false) {

@ -158,7 +158,7 @@ class PlainTableDBTest {
// Return spread of files per level
std::string FilesPerLevel() {
std::string result;
int last_non_zero_offset = 0;
size_t last_non_zero_offset = 0;
for (int level = 0; level < db_->NumberLevels(); level++) {
int f = NumTableFilesAtLevel(level);
char buf[100];

@ -29,14 +29,14 @@ using GFLAGS::ParseCommandLineFlags;
DEFINE_bool(trigger_deadlock, false,
"issue delete in range scan to trigger PrefixHashMap deadlock");
DEFINE_uint64(bucket_count, 100000, "number of buckets");
DEFINE_int32(bucket_count, 100000, "number of buckets");
DEFINE_uint64(num_locks, 10001, "number of locks");
DEFINE_bool(random_prefix, false, "randomize prefix");
DEFINE_uint64(total_prefixes, 100000, "total number of prefixes");
DEFINE_uint64(items_per_prefix, 1, "total number of values per prefix");
DEFINE_int64(write_buffer_size, 33554432, "");
DEFINE_int64(max_write_buffer_number, 2, "");
DEFINE_int64(min_write_buffer_number_to_merge, 1, "");
DEFINE_int32(max_write_buffer_number, 2, "");
DEFINE_int32(min_write_buffer_number_to_merge, 1, "");
DEFINE_int32(skiplist_height, 4, "");
DEFINE_int32(memtable_prefix_bloom_bits, 10000000, "");
DEFINE_int32(memtable_prefix_bloom_probes, 10, "");

@ -253,11 +253,10 @@ class ConcurrentTest {
// Note that generation 0 is never inserted, so it is ok if
// <*,0,*> is missing.
ASSERT_TRUE((gen(pos) == 0U) ||
(gen(pos) > (uint64_t)initial_state.Get(key(pos)))
) << "key: " << key(pos)
<< "; gen: " << gen(pos)
<< "; initgen: "
<< initial_state.Get(key(pos));
(gen(pos) > static_cast<uint64_t>(initial_state.Get(
static_cast<int>(key(pos))))))
<< "key: " << key(pos) << "; gen: " << gen(pos)
<< "; initgen: " << initial_state.Get(static_cast<int>(key(pos)));
// Advance to next key in the valid key space
if (key(pos) < key(current)) {

@ -160,7 +160,7 @@ class VersionEdit {
// Add the specified file at the specified number.
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
void AddFile(int level, uint64_t file, uint64_t file_path_id,
void AddFile(int level, uint64_t file, uint32_t file_path_id,
uint64_t file_size, const InternalKey& smallest,
const InternalKey& largest, const SequenceNumber& smallest_seqno,
const SequenceNumber& largest_seqno) {
@ -180,9 +180,7 @@ class VersionEdit {
}
// Number of edits
int NumEntries() {
return new_files_.size() + deleted_files_.size();
}
size_t NumEntries() { return new_files_.size() + deleted_files_.size(); }
bool IsColumnFamilyManipulation() {
return is_column_family_add_ || is_column_family_drop_;

@ -26,11 +26,12 @@ class VersionEditTest { };
TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50;
static const uint32_t kBig32Bit = 1ull << 30;
VersionEdit edit;
for (int i = 0; i < 4; i++) {
TestEncodeDecode(edit);
edit.AddFile(3, kBig + 300 + i, kBig + 400 + i, 0,
edit.AddFile(3, kBig + 300 + i, kBig32Bit + 400 + i, 0,
InternalKey("foo", kBig + 500 + i, kTypeValue),
InternalKey("zoo", kBig + 600 + i, kTypeDeletion),
kBig + 500 + i, kBig + 600 + i);

@ -201,8 +201,8 @@ class FilePicker {
private:
unsigned int num_levels_;
unsigned int curr_level_;
int search_left_bound_;
int search_right_bound_;
int32_t search_left_bound_;
int32_t search_right_bound_;
#ifndef NDEBUG
std::vector<FileMetaData*>* files_;
#endif
@ -258,11 +258,13 @@ class FilePicker {
start_index = search_left_bound_;
} else if (search_left_bound_ < search_right_bound_) {
if (search_right_bound_ == FileIndexer::kLevelMaxIndex) {
search_right_bound_ = curr_file_level_->num_files - 1;
search_right_bound_ =
static_cast<int32_t>(curr_file_level_->num_files) - 1;
}
start_index = FindFileInRange(*internal_comparator_,
*curr_file_level_, ikey_,
search_left_bound_, search_right_bound_);
start_index =
FindFileInRange(*internal_comparator_, *curr_file_level_, ikey_,
static_cast<uint32_t>(search_left_bound_),
static_cast<uint32_t>(search_right_bound_));
} else {
// search_left_bound > search_right_bound, key does not exist in
// this level. Since no comparision is done in this level, it will
@ -315,7 +317,8 @@ Version::~Version() {
int FindFile(const InternalKeyComparator& icmp,
const LevelFilesBrief& file_level,
const Slice& key) {
return FindFileInRange(icmp, file_level, key, 0, file_level.num_files);
return FindFileInRange(icmp, file_level, key, 0,
static_cast<uint32_t>(file_level.num_files));
}
void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
@ -412,7 +415,7 @@ class LevelFileNumIterator : public Iterator {
const LevelFilesBrief* flevel)
: icmp_(icmp),
flevel_(flevel),
index_(flevel->num_files),
index_(static_cast<uint32_t>(flevel->num_files)),
current_value_(0, 0, 0) { // Marks as invalid
}
virtual bool Valid() const {
@ -423,7 +426,9 @@ class LevelFileNumIterator : public Iterator {
}
virtual void SeekToFirst() { index_ = 0; }
virtual void SeekToLast() {
index_ = (flevel_->num_files == 0) ? 0 : flevel_->num_files - 1;
index_ = (flevel_->num_files == 0)
? 0
: static_cast<uint32_t>(flevel_->num_files) - 1;
}
virtual void Next() {
assert(Valid());
@ -432,7 +437,7 @@ class LevelFileNumIterator : public Iterator {
virtual void Prev() {
assert(Valid());
if (index_ == 0) {
index_ = flevel_->num_files; // Marks as invalid
index_ = static_cast<uint32_t>(flevel_->num_files); // Marks as invalid
} else {
index_--;
}
@ -1213,7 +1218,7 @@ void VersionStorageInfo::GetOverlappingInputs(
i = 0;
}
} else if (file_index) {
*file_index = i-1;
*file_index = static_cast<int>(i) - 1;
}
}
}
@ -1229,7 +1234,7 @@ void VersionStorageInfo::GetOverlappingInputsBinarySearch(
assert(level > 0);
int min = 0;
int mid = 0;
int max = files_[level].size() -1;
int max = static_cast<int>(files_[level].size()) - 1;
bool foundOverlap = false;
const Comparator* user_cmp = user_comparator_;
@ -2646,12 +2651,12 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
// Level-0 files have to be merged together. For other levels,
// we will make a concatenating iterator per level.
// TODO(opt): use concatenating iterator for level-0 if there is no overlap
const int space = (c->level() == 0 ?
c->input_levels(0)->num_files + c->num_input_levels() - 1:
c->num_input_levels());
Iterator** list = new Iterator*[space];
int num = 0;
for (int which = 0; which < c->num_input_levels(); which++) {
const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files +
c->num_input_levels() - 1
: c->num_input_levels());
Iterator** list = new Iterator* [space];
size_t num = 0;
for (size_t which = 0; which < c->num_input_levels(); which++) {
if (c->input_levels(which)->num_files != 0) {
if (c->level(which) == 0) {
const LevelFilesBrief* flevel = c->input_levels(which);
@ -2673,8 +2678,9 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
}
}
assert(num <= space);
Iterator* result = NewMergingIterator(
&c->column_family_data()->internal_comparator(), list, num);
Iterator* result =
NewMergingIterator(&c->column_family_data()->internal_comparator(), list,
static_cast<int>(num));
delete[] list;
return result;
}
@ -2691,9 +2697,9 @@ bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
c->column_family_data()->GetName().c_str());
}
for (int input = 0; input < c->num_input_levels(); ++input) {
for (size_t input = 0; input < c->num_input_levels(); ++input) {
int level = c->level(input);
for (int i = 0; i < c->num_input_files(input); ++i) {
for (size_t i = 0; i < c->num_input_files(input); ++i) {
uint64_t number = c->input(input, i)->fd.GetNumber();
bool found = false;
for (unsigned int j = 0; j < vstorage->files_[level].size(); j++) {

@ -194,7 +194,7 @@ class VersionStorageInfo {
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
int NumLevelFiles(int level) const {
assert(finalized_);
return files_[level].size();
return static_cast<int>(files_[level].size());
}
// Return the combined file size of all files at the specified level.

@ -374,7 +374,7 @@ class MemTableInserter : public WriteBatch::Handler {
Status s = db_->Get(ropts, cf_handle, key, &prev_value);
char* prev_buffer = const_cast<char*>(prev_value.c_str());
uint32_t prev_size = prev_value.size();
uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
auto status = moptions->inplace_callback(s.ok() ? prev_buffer : nullptr,
s.ok() ? &prev_size : nullptr,
value, &merged_value);

@ -20,6 +20,7 @@
#include <cstdarg>
#include <string>
#include <memory>
#include <limits>
#include <vector>
#include <stdint.h>
#include "rocksdb/status.h"
@ -476,8 +477,8 @@ class WritableFile {
if (new_last_preallocated_block > last_preallocated_block_) {
size_t num_spanned_blocks =
new_last_preallocated_block - last_preallocated_block_;
Allocate(block_size * last_preallocated_block_,
block_size * num_spanned_blocks);
Allocate(static_cast<off_t>(block_size * last_preallocated_block_),
static_cast<off_t>(block_size * num_spanned_blocks));
last_preallocated_block_ = new_last_preallocated_block;
}
}
@ -580,7 +581,8 @@ enum InfoLogLevel : unsigned char {
// An interface for writing log messages.
class Logger {
public:
enum { DO_NOT_SUPPORT_GET_LOG_FILE_SIZE = -1 };
size_t kDoNotSupportGetLogFileSize = std::numeric_limits<size_t>::max();
explicit Logger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL)
: log_level_(log_level) {}
virtual ~Logger();
@ -613,9 +615,7 @@ class Logger {
Logv(new_format, ap);
}
}
virtual size_t GetLogFileSize() const {
return DO_NOT_SUPPORT_GET_LOG_FILE_SIZE;
}
virtual size_t GetLogFileSize() const { return kDoNotSupportGetLogFileSize; }
// Flush to the OS buffers
virtual void Flush() {}
virtual InfoLogLevel GetInfoLogLevel() const { return log_level_; }

@ -74,9 +74,8 @@ jbyteArray Java_org_rocksdb_RocksIterator_key0(
auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice key_slice = it->key();
jbyteArray jkey = env->NewByteArray(key_slice.size());
env->SetByteArrayRegion(
jkey, 0, key_slice.size(),
jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size()));
env->SetByteArrayRegion(jkey, 0, static_cast<jsize>(key_slice.size()),
reinterpret_cast<const jbyte*>(key_slice.data()));
return jkey;
}
@ -91,9 +90,9 @@ jbyteArray Java_org_rocksdb_RocksIterator_value0(
auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice value_slice = it->value();
jbyteArray jkeyValue = env->NewByteArray(value_slice.size());
env->SetByteArrayRegion(
jkeyValue, 0, value_slice.size(),
jbyteArray jkeyValue =
env->NewByteArray(static_cast<jsize>(value_slice.size()));
env->SetByteArrayRegion(jkeyValue, 0, static_cast<jsize>(value_slice.size()),
reinterpret_cast<const jbyte*>(value_slice.data()));
return jkeyValue;
}

@ -65,8 +65,8 @@ void Java_org_rocksdb_RestoreBackupableDB_restoreDBFromBackup0(JNIEnv* env,
const char* cwal_dir = env->GetStringUTFChars(jwal_dir, 0);
auto rdb = reinterpret_cast<rocksdb::RestoreBackupableDB*>(jhandle);
rocksdb::Status s =
rdb->RestoreDBFromBackup(jbackup_id, cdb_dir, cwal_dir, *opt);
rocksdb::Status s = rdb->RestoreDBFromBackup(
static_cast<rocksdb::BackupID>(jbackup_id), cdb_dir, cwal_dir, *opt);
env->ReleaseStringUTFChars(jdb_dir, cdb_dir);
env->ReleaseStringUTFChars(jwal_dir, cwal_dir);

@ -234,9 +234,9 @@ jobject Java_org_rocksdb_RocksDB_listColumnFamilies(
for (std::vector<std::string>::size_type i = 0;
i < column_family_names.size(); i++) {
jbyteArray jcf_value =
env->NewByteArray(column_family_names[i].size());
env->SetByteArrayRegion(jcf_value, 0,
column_family_names[i].size(),
env->NewByteArray(static_cast<jsize>(column_family_names[i].size()));
env->SetByteArrayRegion(
jcf_value, 0, static_cast<jsize>(column_family_names[i].size()),
reinterpret_cast<const jbyte*>(column_family_names[i].c_str()));
env->CallBooleanMethod(jvalue_list,
rocksdb::ListJni::getListAddMethodId(env), jcf_value);
@ -516,9 +516,8 @@ jbyteArray rocksdb_get_helper(
}
if (s.ok()) {
jbyteArray jret_value = env->NewByteArray(value.size());
env->SetByteArrayRegion(
jret_value, 0, value.size(),
jbyteArray jret_value = env->NewByteArray(static_cast<jsize>(value.size()));
env->SetByteArrayRegion(jret_value, 0, static_cast<jsize>(value.size()),
reinterpret_cast<const jbyte*>(value.c_str()));
return jret_value;
}
@ -712,9 +711,10 @@ jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db,
// insert in java list
for (std::vector<rocksdb::Status>::size_type i = 0; i != s.size(); i++) {
if (s[i].ok()) {
jbyteArray jentry_value = env->NewByteArray(values[i].size());
jbyteArray jentry_value =
env->NewByteArray(static_cast<jsize>(values[i].size()));
env->SetByteArrayRegion(
jentry_value, 0, values[i].size(),
jentry_value, 0, static_cast<jsize>(values[i].size()),
reinterpret_cast<const jbyte*>(values[i].c_str()));
env->CallBooleanMethod(
jvalue_list, rocksdb::ListJni::getListAddMethodId(env),
@ -1135,10 +1135,11 @@ jlongArray Java_org_rocksdb_RocksDB_iterators(
rocksdb::Status s = db->NewIterators(rocksdb::ReadOptions(),
cf_handles, &iterators);
if (s.ok()) {
jlongArray jLongArray = env->NewLongArray(iterators.size());
for (std::vector<rocksdb::Iterator*>::size_type i = 0;
i < iterators.size(); i++) {
env->SetLongArrayRegion(jLongArray, i, 1,
jlongArray jLongArray =
env->NewLongArray(static_cast<jsize>(iterators.size()));
for (std::vector<rocksdb::Iterator*>::size_type i = 0; i < iterators.size();
i++) {
env->SetLongArrayRegion(jLongArray, static_cast<jsize>(i), 1,
reinterpret_cast<const jlong*>(&iterators[i]));
}
return jLongArray;

@ -39,7 +39,7 @@ void Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
jint Java_org_rocksdb_AbstractSlice_size0(
JNIEnv* env, jobject jobj, jlong handle) {
const rocksdb::Slice* slice = reinterpret_cast<rocksdb::Slice*>(handle);
return slice->size();
return static_cast<jint>(slice->size());
}
/*
@ -154,7 +154,7 @@ void Java_org_rocksdb_Slice_createNewSlice1(
jbyteArray Java_org_rocksdb_Slice_data0(
JNIEnv* env, jobject jobj, jlong handle) {
const rocksdb::Slice* slice = reinterpret_cast<rocksdb::Slice*>(handle);
const int len = slice->size();
const int len = static_cast<int>(slice->size());
const jbyteArray data = env->NewByteArray(len);
env->SetByteArrayRegion(data, 0, len,
reinterpret_cast<jbyte*>(const_cast<char*>(slice->data())));

@ -392,9 +392,8 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
}
delete mem->Unref();
jbyteArray jstate = env->NewByteArray(state.size());
env->SetByteArrayRegion(
jstate, 0, state.size(),
jbyteArray jstate = env->NewByteArray(static_cast<jsize>(state.size()));
env->SetByteArrayRegion(jstate, 0, static_cast<jsize>(state.size()),
reinterpret_cast<const jbyte*>(state.c_str()));
return jstate;

@ -203,13 +203,13 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
// Compress the input, and put compressed data in output.
_stream.next_in = (Bytef *)input;
_stream.avail_in = length;
_stream.avail_in = static_cast<unsigned int>(length);
// Initialize the output size.
_stream.avail_out = length;
_stream.next_out = (Bytef *)&(*output)[0];
_stream.avail_out = static_cast<unsigned int>(length);
_stream.next_out = (Bytef*)&(*output)[0];
int old_sz =0, new_sz =0, new_sz_delta =0;
size_t old_sz = 0, new_sz = 0, new_sz_delta = 0;
bool done = false;
while (!done) {
st = deflate(&_stream, Z_FINISH);
@ -221,12 +221,12 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
// No output space. Increase the output space by 20%.
// (Should we fail the compression since it expands the size?)
old_sz = output->size();
new_sz_delta = (int)(output->size() * 0.2);
new_sz_delta = static_cast<size_t>(output->size() * 0.2);
new_sz = output->size() + (new_sz_delta < 10 ? 10 : new_sz_delta);
output->resize(new_sz);
// Set more output.
_stream.next_out = (Bytef *)&(*output)[old_sz];
_stream.avail_out = new_sz - old_sz;
_stream.avail_out = static_cast<unsigned int>(new_sz - old_sz);
break;
case Z_BUF_ERROR:
default:
@ -258,18 +258,18 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
}
_stream.next_in = (Bytef *)input_data;
_stream.avail_in = input_length;
_stream.avail_in = static_cast<unsigned int>(input_length);
// Assume the decompressed data size will 5x of compressed size.
int output_len = input_length * 5;
size_t output_len = input_length * 5;
char* output = new char[output_len];
int old_sz = output_len;
size_t old_sz = output_len;
_stream.next_out = (Bytef *)output;
_stream.avail_out = output_len;
_stream.avail_out = static_cast<unsigned int>(output_len);
char* tmp = nullptr;
int output_len_delta;
size_t output_len_delta;
bool done = false;
//while(_stream.next_in != nullptr && _stream.avail_in != 0) {
@ -282,7 +282,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
case Z_OK:
// No output space. Increase the output space by 20%.
old_sz = output_len;
output_len_delta = (int)(output_len * 0.2);
output_len_delta = static_cast<size_t>(output_len * 0.2);
output_len += output_len_delta < 10 ? 10 : output_len_delta;
tmp = new char[output_len];
memcpy(tmp, output, old_sz);
@ -291,7 +291,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
// Set more output.
_stream.next_out = (Bytef *)(output + old_sz);
_stream.avail_out = output_len - old_sz;
_stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
break;
case Z_BUF_ERROR:
default:
@ -301,7 +301,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
}
}
*decompress_size = output_len - _stream.avail_out;
*decompress_size = static_cast<int>(output_len - _stream.avail_out);
inflateEnd(&_stream);
return output;
#endif
@ -329,14 +329,14 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input,
// Compress the input, and put compressed data in output.
_stream.next_in = (char *)input;
_stream.avail_in = length;
_stream.avail_in = static_cast<unsigned int>(length);
// Initialize the output size.
_stream.next_out = (char *)&(*output)[0];
_stream.avail_out = length;
_stream.avail_out = static_cast<unsigned int>(length);
int old_sz =0, new_sz =0;
while(_stream.next_in != nullptr && _stream.avail_in != 0) {
size_t old_sz = 0, new_sz = 0;
while (_stream.next_in != nullptr && _stream.avail_in != 0) {
st = BZ2_bzCompress(&_stream, BZ_FINISH);
switch (st) {
case BZ_STREAM_END:
@ -345,11 +345,11 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input,
// No output space. Increase the output space by 20%.
// (Should we fail the compression since it expands the size?)
old_sz = output->size();
new_sz = (int)(output->size() * 1.2);
new_sz = static_cast<size_t>(output->size() * 1.2);
output->resize(new_sz);
// Set more output.
_stream.next_out = (char *)&(*output)[old_sz];
_stream.avail_out = new_sz - old_sz;
_stream.avail_out = static_cast<unsigned int>(new_sz - old_sz);
break;
case BZ_SEQUENCE_ERROR:
default:
@ -377,15 +377,15 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
}
_stream.next_in = (char *)input_data;
_stream.avail_in = input_length;
_stream.avail_in = static_cast<unsigned int>(input_length);
// Assume the decompressed data size will be 5x of compressed size.
int output_len = input_length * 5;
size_t output_len = input_length * 5;
char* output = new char[output_len];
int old_sz = output_len;
size_t old_sz = output_len;
_stream.next_out = (char *)output;
_stream.avail_out = output_len;
_stream.avail_out = static_cast<unsigned int>(output_len);
char* tmp = nullptr;
@ -397,7 +397,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
case BZ_OK:
// No output space. Increase the output space by 20%.
old_sz = output_len;
output_len = (int)(output_len * 1.2);
output_len = static_cast<size_t>(output_len * 1.2);
tmp = new char[output_len];
memcpy(tmp, output, old_sz);
delete[] output;
@ -405,7 +405,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
// Set more output.
_stream.next_out = (char *)(output + old_sz);
_stream.avail_out = output_len - old_sz;
_stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
break;
default:
delete[] output;
@ -414,7 +414,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
}
}
*decompress_size = output_len - _stream.avail_out;
*decompress_size = static_cast<int>(output_len - _stream.avail_out);
BZ2_bzDecompressEnd(&_stream);
return output;
#endif
@ -424,16 +424,16 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
inline bool LZ4_Compress(const CompressionOptions &opts, const char *input,
size_t length, ::std::string* output) {
#ifdef LZ4
int compressBound = LZ4_compressBound(length);
output->resize(8 + compressBound);
char *p = const_cast<char *>(output->c_str());
int compressBound = LZ4_compressBound(static_cast<int>(length));
output->resize(static_cast<size_t>(8 + compressBound));
char* p = const_cast<char*>(output->c_str());
memcpy(p, &length, sizeof(length));
size_t outlen;
outlen = LZ4_compress_limitedOutput(input, p + 8, length, compressBound);
int outlen = LZ4_compress_limitedOutput(
input, p + 8, static_cast<int>(length), compressBound);
if (outlen == 0) {
return false;
}
output->resize(8 + outlen);
output->resize(static_cast<size_t>(8 + outlen));
return true;
#endif
return false;
@ -449,7 +449,8 @@ inline char* LZ4_Uncompress(const char* input_data, size_t input_length,
memcpy(&output_len, input_data, sizeof(output_len));
char *output = new char[output_len];
*decompress_size = LZ4_decompress_safe_partial(
input_data + 8, output, input_length - 8, output_len, output_len);
input_data + 8, output, static_cast<int>(input_length - 8), output_len,
output_len);
if (*decompress_size < 0) {
delete[] output;
return nullptr;
@ -462,21 +463,22 @@ inline char* LZ4_Uncompress(const char* input_data, size_t input_length,
inline bool LZ4HC_Compress(const CompressionOptions &opts, const char* input,
size_t length, ::std::string* output) {
#ifdef LZ4
int compressBound = LZ4_compressBound(length);
output->resize(8 + compressBound);
char *p = const_cast<char *>(output->c_str());
int compressBound = LZ4_compressBound(static_cast<int>(length));
output->resize(static_cast<size_t>(8 + compressBound));
char* p = const_cast<char*>(output->c_str());
memcpy(p, &length, sizeof(length));
size_t outlen;
int outlen;
#ifdef LZ4_VERSION_MAJOR // they only started defining this since r113
outlen = LZ4_compressHC2_limitedOutput(input, p + 8, length, compressBound,
opts.level);
outlen = LZ4_compressHC2_limitedOutput(input, p + 8, static_cast<int>(length),
compressBound, opts.level);
#else
outlen = LZ4_compressHC_limitedOutput(input, p + 8, length, compressBound);
outlen = LZ4_compressHC_limitedOutput(input, p + 8, static_cast<int>(length),
compressBound);
#endif
if (outlen == 0) {
return false;
}
output->resize(8 + outlen);
output->resize(static_cast<size_t>(8 + outlen));
return true;
#endif
return false;

@ -304,7 +304,8 @@ Block::Block(BlockContents&& contents)
if (size_ < sizeof(uint32_t)) {
size_ = 0; // Error marker
} else {
restart_offset_ = size_ - (1 + NumRestarts()) * sizeof(uint32_t);
restart_offset_ =
static_cast<uint32_t>(size_) - (1 + NumRestarts()) * sizeof(uint32_t);
if (restart_offset_ > size_ - sizeof(uint32_t)) {
// The size is too small for NumRestarts() and therefore
// restart_offset_ wrapped around.

@ -159,7 +159,8 @@ class BlockIter : public Iterator {
// Return the offset in data_ just past the end of the current entry.
inline uint32_t NextEntryOffset() const {
return (value_.data() + value_.size()) - data_;
// NOTE: We don't support files bigger than 2GB
return static_cast<uint32_t>((value_.data() + value_.size()) - data_);
}
uint32_t GetRestartPoint(uint32_t index) {

@ -99,7 +99,7 @@ Slice BlockBasedFilterBlockBuilder::Finish() {
}
// Append array of per-filter offsets
const uint32_t array_offset = result_.size();
const uint32_t array_offset = static_cast<uint32_t>(result_.size());
for (size_t i = 0; i < filter_offsets_.size(); i++) {
PutFixed32(&result_, filter_offsets_[i]);
}
@ -113,7 +113,7 @@ void BlockBasedFilterBlockBuilder::GenerateFilter() {
const size_t num_entries = start_.size();
if (num_entries == 0) {
// Fast path if there are no keys for this filter
filter_offsets_.push_back(result_.size());
filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
return;
}
@ -127,8 +127,9 @@ void BlockBasedFilterBlockBuilder::GenerateFilter() {
}
// Generate filter for current set of keys and append to result_.
filter_offsets_.push_back(result_.size());
policy_->CreateFilter(&tmp_entries_[0], num_entries, &result_);
filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
policy_->CreateFilter(&tmp_entries_[0], static_cast<int>(num_entries),
&result_);
tmp_entries_.clear();
entries_.clear();

@ -203,7 +203,7 @@ class HashIndexBuilder : public IndexBuilder {
// copy.
pending_entry_prefix_ = key_prefix.ToString();
pending_block_num_ = 1;
pending_entry_index_ = current_restart_index_;
pending_entry_index_ = static_cast<uint32_t>(current_restart_index_);
} else {
// entry number increments when keys share the prefix reside in
// differnt data blocks.
@ -234,7 +234,8 @@ class HashIndexBuilder : public IndexBuilder {
void FlushPendingPrefix() {
prefix_block_.append(pending_entry_prefix_.data(),
pending_entry_prefix_.size());
PutVarint32(&prefix_meta_block_, pending_entry_prefix_.size());
PutVarint32(&prefix_meta_block_,
static_cast<uint32_t>(pending_entry_prefix_.size()));
PutVarint32(&prefix_meta_block_, pending_entry_index_);
PutVarint32(&prefix_meta_block_, pending_block_num_);
}
@ -596,7 +597,8 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
}
case kxxHash: {
void* xxh = XXH32_init(0);
XXH32_update(xxh, block_contents.data(), block_contents.size());
XXH32_update(xxh, block_contents.data(),
static_cast<uint32_t>(block_contents.size()));
XXH32_update(xxh, trailer, 1); // Extend to cover block type
EncodeFixed32(trailer_without_type, XXH32_digest(xxh));
break;

@ -85,7 +85,7 @@ Slice BlockBuilder::Finish() {
for (size_t i = 0; i < restarts_.size(); i++) {
PutFixed32(&buffer_, restarts_[i]);
}
PutFixed32(&buffer_, restarts_.size());
PutFixed32(&buffer_, static_cast<uint32_t>(restarts_.size()));
finished_ = true;
return Slice(buffer_);
}
@ -103,15 +103,15 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
}
} else {
// Restart compression
restarts_.push_back(buffer_.size());
restarts_.push_back(static_cast<uint32_t>(buffer_.size()));
counter_ = 0;
}
const size_t non_shared = key.size() - shared;
// Add "<shared><non_shared><value_size>" to buffer_
PutVarint32(&buffer_, shared);
PutVarint32(&buffer_, non_shared);
PutVarint32(&buffer_, value.size());
PutVarint32(&buffer_, static_cast<uint32_t>(shared));
PutVarint32(&buffer_, static_cast<uint32_t>(non_shared));
PutVarint32(&buffer_, static_cast<uint32_t>(value.size()));
// Add string delta to buffer_ followed by value
buffer_.append(key.data() + shared, non_shared);

@ -59,7 +59,7 @@ BlockHashIndex* CreateBlockHashIndexOnTheFly(
auto hash_index = new BlockHashIndex(
hash_key_extractor,
true /* hash_index will copy prefix when Add() is called */);
uint64_t current_restart_index = 0;
uint32_t current_restart_index = 0;
std::string pending_entry_prefix;
// pending_block_num == 0 also implies there is no entry inserted at all.

@ -82,8 +82,8 @@ TEST(BlockTest, BasicTest) {
auto prefix_extractor = NewFixedPrefixTransform(prefix_size);
std::unique_ptr<BlockHashIndex> block_hash_index(CreateBlockHashIndexOnTheFly(
&index_iter, &data_iter, index_entries.size(), BytewiseComparator(),
prefix_extractor));
&index_iter, &data_iter, static_cast<uint32_t>(index_entries.size()),
BytewiseComparator(), prefix_extractor));
std::map<std::string, BlockHashIndex::RestartIndex> expected = {
{"01xx", BlockHashIndex::RestartIndex(0, 1)},

@ -87,7 +87,7 @@ class BlockPrefixIndex::Builder {
BlockPrefixIndex* Finish() {
// For now, use roughly 1:1 prefix to bucket ratio.
uint32_t num_buckets = prefixes_.size() + 1;
uint32_t num_buckets = static_cast<uint32_t>(prefixes_.size()) + 1;
// Collect prefix records that hash to the same bucket, into a single
// linklist.

@ -163,7 +163,7 @@ void CheckBlockContents(BlockContents contents, const int max_key,
auto iter1 = reader1.NewIterator(nullptr);
auto iter2 = reader1.NewIterator(nullptr);
reader1.SetBlockHashIndex(CreateBlockHashIndexOnTheFly(
iter1, iter2, keys.size(), BytewiseComparator(),
iter1, iter2, static_cast<uint32_t>(keys.size()), BytewiseComparator(),
prefix_extractor.get()));
delete iter1;

@ -182,7 +182,7 @@ Slice CuckooTableBuilder::GetValue(uint64_t idx) const {
Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1);
uint64_t make_space_for_key_call_id = 0;
uint32_t make_space_for_key_call_id = 0;
for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) {
uint64_t bucket_id;
bool bucket_found = false;
@ -254,7 +254,7 @@ Status CuckooTableBuilder::Finish() {
}
// Determine unused_user_key to fill empty buckets.
std::string unused_user_key = smallest_user_key_;
int curr_pos = unused_user_key.size() - 1;
int curr_pos = static_cast<int>(unused_user_key.size()) - 1;
while (curr_pos >= 0) {
--unused_user_key[curr_pos];
if (Slice(unused_user_key).compare(smallest_user_key_) < 0) {
@ -265,7 +265,7 @@ Status CuckooTableBuilder::Finish() {
if (curr_pos < 0) {
// Try using the largest key to identify an unused key.
unused_user_key = largest_user_key_;
curr_pos = unused_user_key.size() - 1;
curr_pos = static_cast<int>(unused_user_key.size()) - 1;
while (curr_pos >= 0) {
++unused_user_key[curr_pos];
if (Slice(unused_user_key).compare(largest_user_key_) > 0) {
@ -429,9 +429,8 @@ uint64_t CuckooTableBuilder::FileSize() const {
// If tree depth exceedes max depth, we return false indicating failure.
bool CuckooTableBuilder::MakeSpaceForKey(
const autovector<uint64_t>& hash_vals,
const uint64_t make_space_for_key_call_id,
std::vector<CuckooBucket>* buckets,
uint64_t* bucket_id) {
const uint32_t make_space_for_key_call_id,
std::vector<CuckooBucket>* buckets, uint64_t* bucket_id) {
struct CuckooNode {
uint64_t bucket_id;
uint32_t depth;
@ -495,7 +494,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(
// child with the parent. Stop when first level is reached in the tree
// (happens when 0 <= bucket_to_replace_pos < num_hash_func_) and return
// this location in first level for target key to be inserted.
uint32_t bucket_to_replace_pos = tree.size()-1;
uint32_t bucket_to_replace_pos = static_cast<uint32_t>(tree.size()) - 1;
while (bucket_to_replace_pos >= num_hash_func_) {
CuckooNode& curr_node = tree[bucket_to_replace_pos];
(*buckets)[curr_node.bucket_id] =

@ -68,11 +68,9 @@ class CuckooTableBuilder: public TableBuilder {
};
static const uint32_t kMaxVectorIdx = std::numeric_limits<int32_t>::max();
bool MakeSpaceForKey(
const autovector<uint64_t>& hash_vals,
const uint64_t call_id,
std::vector<CuckooBucket>* buckets,
uint64_t* bucket_id);
bool MakeSpaceForKey(const autovector<uint64_t>& hash_vals,
const uint32_t call_id,
std::vector<CuckooBucket>* buckets, uint64_t* bucket_id);
Status MakeHashTable(std::vector<CuckooBucket>* buckets);
inline bool IsDeletedKey(uint64_t idx) const;

@ -87,13 +87,14 @@ class CuckooBuilderTest {
// Check contents of the bucket.
std::vector<bool> keys_found(keys.size(), false);
uint32_t bucket_size = expected_unused_bucket.size();
size_t bucket_size = expected_unused_bucket.size();
for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) {
Slice read_slice;
ASSERT_OK(read_file->Read(i*bucket_size, bucket_size,
&read_slice, nullptr));
uint32_t key_idx = std::find(expected_locations.begin(),
expected_locations.end(), i) - expected_locations.begin();
size_t key_idx =
std::find(expected_locations.begin(), expected_locations.end(), i) -
expected_locations.begin();
if (key_idx == keys.size()) {
// i is not one of the expected locaitons. Empty bucket.
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
@ -156,7 +157,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false));
}
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/NoCollisionFullKey";
@ -169,7 +170,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = keys[0].size() + values[0].size();
size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());
@ -196,7 +197,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false));
}
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionFullKey";
@ -209,7 +210,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = keys[0].size() + values[0].size();
size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());
@ -236,7 +237,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false));
}
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
uint32_t cuckoo_block_size = 2;
@ -251,7 +252,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = keys[0].size() + values[0].size();
size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());
@ -283,7 +284,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKey) {
for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false));
}
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionPathFullKey";
@ -296,7 +297,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKey) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = keys[0].size() + values[0].size();
size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());
@ -325,7 +326,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false));
}
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionPathFullKeyAndCuckooBlock";
@ -338,7 +339,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = keys[0].size() + values[0].size();
size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());
@ -361,7 +362,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
{user_keys[3], {3, 4, 5, 6}}
};
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/NoCollisionUserKey";
@ -374,7 +375,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = user_keys[0].size() + values[0].size();
size_t bucket_size = user_keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());
@ -397,7 +398,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
{user_keys[3], {0, 1, 2, 3}},
};
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionUserKey";
@ -410,7 +411,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = user_keys[0].size() + values[0].size();
size_t bucket_size = user_keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());
@ -435,7 +436,7 @@ TEST(CuckooBuilderTest, WithCollisionPathUserKey) {
{user_keys[4], {0, 2}},
};
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionPathUserKey";
@ -448,7 +449,7 @@ TEST(CuckooBuilderTest, WithCollisionPathUserKey) {
ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status());
}
uint32_t bucket_size = user_keys[0].size() + values[0].size();
size_t bucket_size = user_keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close());

@ -28,7 +28,7 @@ static inline uint64_t CuckooHash(
if (hash_cnt == 0 && identity_as_first_hash) {
value = (*reinterpret_cast<const int64_t*>(user_key.data()));
} else {
value = MurmurHash(user_key.data(), user_key.size(),
value = MurmurHash(user_key.data(), static_cast<int>(user_key.size()),
kCuckooMurmurSeedMultiplier * hash_cnt);
}
if (use_module_hash) {

@ -64,7 +64,7 @@ CuckooTableReader::CuckooTableReader(
}
unused_key_ = unused_key->second;
key_length_ = props->fixed_key_len;
key_length_ = static_cast<uint32_t>(props->fixed_key_len);
auto user_key_len = user_props.find(CuckooTablePropertyNames::kUserKeyLength);
if (user_key_len == user_props.end()) {
status_ = Status::Corruption("User key length not found");
@ -274,7 +274,7 @@ void CuckooTableIterator::SeekToFirst() {
void CuckooTableIterator::SeekToLast() {
InitIfNeeded();
curr_key_idx_ = sorted_bucket_ids_.size() - 1;
curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size()) - 1;
PrepareKVAtCurrIdx();
}
@ -288,7 +288,8 @@ void CuckooTableIterator::Seek(const Slice& target) {
sorted_bucket_ids_.end(),
kInvalidIndex,
seek_comparator);
curr_key_idx_ = std::distance(sorted_bucket_ids_.begin(), seek_it);
curr_key_idx_ =
static_cast<uint32_t>(std::distance(sorted_bucket_ids_.begin(), seek_it));
PrepareKVAtCurrIdx();
}
@ -327,7 +328,7 @@ void CuckooTableIterator::Next() {
void CuckooTableIterator::Prev() {
if (curr_key_idx_ == 0) {
curr_key_idx_ = sorted_bucket_ids_.size();
curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size());
}
if (!Valid()) {
curr_value_.clear();

@ -161,7 +161,7 @@ class CuckooReaderTest {
ASSERT_EQ(static_cast<uint32_t>(cnt), num_items);
it->SeekToLast();
cnt = num_items - 1;
cnt = static_cast<int>(num_items) - 1;
ASSERT_TRUE(it->Valid());
while (it->Valid()) {
ASSERT_OK(it->status());
@ -172,7 +172,7 @@ class CuckooReaderTest {
}
ASSERT_EQ(cnt, -1);
cnt = num_items / 2;
cnt = static_cast<int>(num_items) / 2;
it->Seek(keys[cnt]);
while (it->Valid()) {
ASSERT_OK(it->status());

@ -240,7 +240,7 @@ Status ReadBlock(RandomAccessFile* file, const Footer& footer,
actual = crc32c::Value(data, n + 1);
break;
case kxxHash:
actual = XXH32(data, n + 1, 0);
actual = XXH32(data, static_cast<int>(n) + 1, 0);
break;
default:
s = Status::Corruption("unknown checksum type");

@ -25,7 +25,7 @@ class TestFilterBitsBuilder : public FilterBitsBuilder {
// Generate the filter using the keys that are added
virtual Slice Finish(std::unique_ptr<const char[]>* buf) override {
uint32_t len = hash_entries_.size() * 4;
uint32_t len = static_cast<uint32_t>(hash_entries_.size()) * 4;
char* data = new char[len];
for (size_t i = 0; i < hash_entries_.size(); i++) {
EncodeFixed32(data + i * 4, hash_entries_[i]);
@ -42,7 +42,7 @@ class TestFilterBitsBuilder : public FilterBitsBuilder {
class TestFilterBitsReader : public FilterBitsReader {
public:
explicit TestFilterBitsReader(const Slice& contents)
: data_(contents.data()), len_(contents.size()) {}
: data_(contents.data()), len_(static_cast<uint32_t>(contents.size())) {}
virtual bool MayMatch(const Slice& entry) override {
uint32_t h = Hash(entry.data(), entry.size(), 1);

@ -49,9 +49,9 @@ class MergerTest {
MergerTest()
: rnd_(3), merging_iterator_(nullptr), single_iterator_(nullptr) {}
~MergerTest() = default;
std::vector<std::string> GenerateStrings(int len, int string_len) {
std::vector<std::string> GenerateStrings(size_t len, int string_len) {
std::vector<std::string> ret;
for (int i = 0; i < len; ++i) {
for (size_t i = 0; i < len; ++i) {
ret.push_back(test::RandomHumanReadableString(&rnd_, string_len));
}
return ret;
@ -119,7 +119,7 @@ class MergerTest {
}
void Generate(size_t num_iterators, size_t strings_per_iterator,
size_t letters_per_string) {
int letters_per_string) {
std::vector<Iterator*> small_iterators;
for (size_t i = 0; i < num_iterators; ++i) {
auto strings = GenerateStrings(strings_per_iterator, letters_per_string);
@ -127,8 +127,9 @@ class MergerTest {
all_keys_.insert(all_keys_.end(), strings.begin(), strings.end());
}
merging_iterator_.reset(NewMergingIterator(
BytewiseComparator(), &small_iterators[0], small_iterators.size()));
merging_iterator_.reset(
NewMergingIterator(BytewiseComparator(), &small_iterators[0],
static_cast<int>(small_iterators.size())));
single_iterator_.reset(new VectorIterator(all_keys_));
}

@ -6,8 +6,10 @@
#ifndef ROCKSDB_LITE
#include "table/plain_table_builder.h"
#include <string>
#include <assert.h>
#include <string>
#include <limits>
#include <map>
#include "rocksdb/comparator.h"
@ -133,7 +135,8 @@ void PlainTableBuilder::Add(const Slice& key, const Slice& value) {
}
// Write value
auto prev_offset = offset_;
assert(offset_ <= std::numeric_limits<uint32_t>::max());
auto prev_offset = static_cast<uint32_t>(offset_);
// Write out the key
encoder_.AppendKey(key, file_, &offset_, meta_bytes_buf,
&meta_bytes_buf_size);
@ -142,7 +145,7 @@ void PlainTableBuilder::Add(const Slice& key, const Slice& value) {
}
// Write value length
int value_size = value.size();
uint32_t value_size = static_cast<uint32_t>(value.size());
char* end_ptr =
EncodeVarint32(meta_bytes_buf + meta_bytes_buf_size, value_size);
assert(end_ptr <= meta_bytes_buf + sizeof(meta_bytes_buf));
@ -180,10 +183,11 @@ Status PlainTableBuilder::Finish() {
MetaIndexBuilder meta_index_builer;
if (store_index_in_file_ && (properties_.num_entries > 0)) {
assert(properties_.num_entries <= std::numeric_limits<uint32_t>::max());
bloom_block_.SetTotalBits(
&arena_, properties_.num_entries * bloom_bits_per_key_,
ioptions_.bloom_locality, huge_page_tlb_size_,
ioptions_.info_log);
&arena_,
static_cast<uint32_t>(properties_.num_entries) * bloom_bits_per_key_,
ioptions_.bloom_locality, huge_page_tlb_size_, ioptions_.info_log);
PutVarint32(&properties_.user_collected_properties
[PlainTablePropertyNames::kNumBloomBlocks],

@ -81,7 +81,7 @@ class PlainTableBuilder: public TableBuilder {
WritableFile* file_;
uint64_t offset_ = 0;
uint32_t bloom_bits_per_key_;
uint32_t huge_page_tlb_size_;
size_t huge_page_tlb_size_;
Status status_;
TableProperties properties_;
PlainTableKeyEncoder encoder_;

@ -3,6 +3,12 @@
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "table/plain_table_index.h"
#include "util/coding.h"
#include "util/hash.h"
@ -24,7 +30,8 @@ Status PlainTableIndex::InitFromRawData(Slice data) {
if (!GetVarint32(&data, &num_prefixes_)) {
return Status::Corruption("Couldn't read the index size!");
}
sub_index_size_ = data.size() - index_size_ * kOffsetLen;
sub_index_size_ =
static_cast<uint32_t>(data.size()) - index_size_ * kOffsetLen;
char* index_data_begin = const_cast<char*>(data.data());
index_ = reinterpret_cast<uint32_t*>(index_data_begin);
@ -55,13 +62,15 @@ void PlainTableIndexBuilder::IndexRecordList::AddRecord(murmur_t hash,
num_records_in_current_group_ = 0;
}
auto& new_record = current_group_[num_records_in_current_group_++];
new_record.hash = hash;
// TODO(sdong) -- check if this is OK -- murmur_t is uint64_t, while we only
// use 32 bits here
new_record.hash = static_cast<uint32_t>(hash);
new_record.offset = offset;
new_record.next = nullptr;
}
void PlainTableIndexBuilder::AddKeyPrefix(Slice key_prefix_slice,
uint64_t key_offset) {
uint32_t key_offset) {
if (is_first_record_ || prev_key_prefix_ != key_prefix_slice.ToString()) {
++num_prefixes_;
if (!is_first_record_) {
@ -149,7 +158,7 @@ Slice PlainTableIndexBuilder::FillIndexes(
const std::vector<IndexRecord*>& hash_to_offsets,
const std::vector<uint32_t>& entries_per_bucket) {
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log,
"Reserving %zu bytes for plain table's sub_index",
"Reserving %" PRIu32 " bytes for plain table's sub_index",
sub_index_size_);
auto total_allocate_size = GetTotalSize();
char* allocated = arena_->AllocateAligned(
@ -160,7 +169,7 @@ Slice PlainTableIndexBuilder::FillIndexes(
reinterpret_cast<uint32_t*>(EncodeVarint32(temp_ptr, num_prefixes_));
char* sub_index = reinterpret_cast<char*>(index + index_size_);
size_t sub_index_offset = 0;
uint32_t sub_index_offset = 0;
for (uint32_t i = 0; i < index_size_; i++) {
uint32_t num_keys_for_bucket = entries_per_bucket[i];
switch (num_keys_for_bucket) {

@ -92,7 +92,7 @@ class PlainTableIndex {
private:
uint32_t index_size_;
size_t sub_index_size_;
uint32_t sub_index_size_;
uint32_t num_prefixes_;
uint32_t* index_;
@ -109,8 +109,8 @@ class PlainTableIndex {
class PlainTableIndexBuilder {
public:
PlainTableIndexBuilder(Arena* arena, const ImmutableCFOptions& ioptions,
uint32_t index_sparseness, double hash_table_ratio,
double huge_page_tlb_size)
size_t index_sparseness, double hash_table_ratio,
size_t huge_page_tlb_size)
: arena_(arena),
ioptions_(ioptions),
record_list_(kRecordsPerGroup),
@ -124,7 +124,7 @@ class PlainTableIndexBuilder {
hash_table_ratio_(hash_table_ratio),
huge_page_tlb_size_(huge_page_tlb_size) {}
void AddKeyPrefix(Slice key_prefix_slice, uint64_t key_offset);
void AddKeyPrefix(Slice key_prefix_slice, uint32_t key_offset);
Slice Finish();
@ -205,13 +205,13 @@ class PlainTableIndexBuilder {
uint32_t num_keys_per_prefix_;
uint32_t prev_key_prefix_hash_;
uint32_t index_sparseness_;
size_t index_sparseness_;
uint32_t index_size_;
size_t sub_index_size_;
uint32_t sub_index_size_;
const SliceTransform* prefix_extractor_;
double hash_table_ratio_;
double huge_page_tlb_size_;
size_t huge_page_tlb_size_;
std::string prev_key_prefix_;

@ -43,7 +43,7 @@ size_t EncodeSize(EntryType type, uint32_t key_size, char* out_buffer) {
// Return position after the size byte(s). nullptr means error
const char* DecodeSize(const char* offset, const char* limit,
EntryType* entry_type, size_t* key_size) {
EntryType* entry_type, uint32_t* key_size) {
assert(offset < limit);
*entry_type = static_cast<EntryType>(
(static_cast<unsigned char>(offset[0]) & ~kSizeInlineLimit) >> 6);
@ -73,10 +73,10 @@ Status PlainTableKeyEncoder::AppendKey(const Slice& key, WritableFile* file,
Slice key_to_write = key; // Portion of internal key to write out.
size_t user_key_size = fixed_user_key_len_;
uint32_t user_key_size = fixed_user_key_len_;
if (encoding_type_ == kPlain) {
if (fixed_user_key_len_ == kPlainTableVariableLength) {
user_key_size = key.size() - 8;
user_key_size = static_cast<uint32_t>(key.size() - 8);
// Write key length
char key_size_buf[5]; // tmp buffer for key size as varint32
char* ptr = EncodeVarint32(key_size_buf, user_key_size);
@ -93,7 +93,7 @@ Status PlainTableKeyEncoder::AppendKey(const Slice& key, WritableFile* file,
char size_bytes[12];
size_t size_bytes_pos = 0;
user_key_size = key.size() - 8;
user_key_size = static_cast<uint32_t>(key.size() - 8);
Slice prefix =
prefix_extractor_->Transform(Slice(key.data(), user_key_size));
@ -112,10 +112,11 @@ Status PlainTableKeyEncoder::AppendKey(const Slice& key, WritableFile* file,
if (key_count_for_prefix_ == 2) {
// For second key within a prefix, need to encode prefix length
size_bytes_pos +=
EncodeSize(kPrefixFromPreviousKey, pre_prefix_.GetKey().size(),
EncodeSize(kPrefixFromPreviousKey,
static_cast<uint32_t>(pre_prefix_.GetKey().size()),
size_bytes + size_bytes_pos);
}
size_t prefix_len = pre_prefix_.GetKey().size();
uint32_t prefix_len = static_cast<uint32_t>(pre_prefix_.GetKey().size());
size_bytes_pos += EncodeSize(kKeySuffix, user_key_size - prefix_len,
size_bytes + size_bytes_pos);
Status s = file->Append(Slice(size_bytes, size_bytes_pos));
@ -184,7 +185,7 @@ Status PlainTableKeyDecoder::NextPlainEncodingKey(
const char* start, const char* limit, ParsedInternalKey* parsed_key,
Slice* internal_key, size_t* bytes_read, bool* seekable) {
const char* key_ptr = start;
size_t user_key_size = 0;
uint32_t user_key_size = 0;
if (fixed_user_key_len_ != kPlainTableVariableLength) {
user_key_size = fixed_user_key_len_;
key_ptr = start;
@ -195,7 +196,7 @@ Status PlainTableKeyDecoder::NextPlainEncodingKey(
return Status::Corruption(
"Unexpected EOF when reading the next key's size");
}
user_key_size = static_cast<size_t>(tmp_size);
user_key_size = tmp_size;
*bytes_read = key_ptr - start;
}
// dummy initial value to avoid compiler complain
@ -227,7 +228,7 @@ Status PlainTableKeyDecoder::NextPrefixEncodingKey(
bool expect_suffix = false;
do {
size_t size = 0;
uint32_t size = 0;
// dummy initial value to avoid compiler complain
bool decoded_internal_key_valid = true;
const char* pos = DecodeSize(key_ptr, limit, &entry_type, &size);

@ -98,8 +98,8 @@ PlainTableReader::PlainTableReader(const ImmutableCFOptions& ioptions,
: internal_comparator_(icomparator),
encoding_type_(encoding_type),
full_scan_mode_(false),
data_end_offset_(table_properties->data_size),
user_key_len_(table_properties->fixed_key_len),
data_end_offset_(static_cast<uint32_t>(table_properties->data_size)),
user_key_len_(static_cast<uint32_t>(table_properties->fixed_key_len)),
prefix_extractor_(ioptions.prefix_extractor),
enable_bloom_(false),
bloom_(6, nullptr),
@ -327,7 +327,8 @@ Status PlainTableReader::PopulateIndex(TableProperties* props,
// Allocate bloom filter here for total order mode.
if (IsTotalOrderMode()) {
uint32_t num_bloom_bits =
table_properties_->num_entries * bloom_bits_per_key;
static_cast<uint32_t>(table_properties_->num_entries) *
bloom_bits_per_key;
if (num_bloom_bits > 0) {
enable_bloom_ = true;
bloom_.SetTotalBits(&arena_, num_bloom_bits, ioptions_.bloom_locality,
@ -350,7 +351,7 @@ Status PlainTableReader::PopulateIndex(TableProperties* props,
bloom_.SetRawData(
const_cast<unsigned char*>(
reinterpret_cast<const unsigned char*>(bloom_block->data())),
bloom_block->size() * 8, num_blocks);
static_cast<uint32_t>(bloom_block->size()) * 8, num_blocks);
}
PlainTableIndexBuilder index_builder(&arena_, ioptions_, index_sparseness,
@ -509,7 +510,7 @@ Status PlainTableReader::Next(PlainTableKeyDecoder* decoder, uint32_t* offset,
return Status::Corruption(
"Unexpected EOF when reading the next value's size.");
}
*offset = *offset + (value_ptr - start) + value_size;
*offset = *offset + static_cast<uint32_t>(value_ptr - start) + value_size;
if (*offset > data_end_offset_) {
return Status::Corruption("Unexpected EOF when reading the next value. ");
}

@ -123,7 +123,7 @@ class PlainTableReader: public TableReader {
// sst file that stores data.
const uint32_t data_start_offset_ = 0;
const uint32_t data_end_offset_;
const size_t user_key_len_;
const uint32_t user_key_len_;
const SliceTransform* prefix_extractor_;
static const size_t kNumInternalBytes = 8;
@ -135,7 +135,7 @@ class PlainTableReader: public TableReader {
const ImmutableCFOptions& ioptions_;
unique_ptr<RandomAccessFile> file_;
uint32_t file_size_;
uint64_t file_size_;
std::shared_ptr<const TableProperties> table_properties_;
bool IsFixedLength() const {

@ -947,7 +947,7 @@ class Harness {
if (keys.empty()) {
return "foo";
} else {
const int index = rnd->Uniform(keys.size());
const int index = rnd->Uniform(static_cast<int>(keys.size()));
std::string result = keys[index];
switch (rnd->Uniform(support_prev_ ? 3 : 1)) {
case 0:

@ -31,7 +31,7 @@ BlobStore* bs;
namespace {
std::string RandomString(Random* rnd, uint64_t len) {
std::string r;
test::RandomString(rnd, len, &r);
test::RandomString(rnd, static_cast<int>(len), &r);
return r;
}
} // namespace

@ -113,7 +113,8 @@ DEFINE_bool(verbose, false, "Verbose");
DEFINE_bool(progress_reports, true,
"If true, db_stress will report number of finished operations");
DEFINE_int32(write_buffer_size, rocksdb::Options().write_buffer_size,
DEFINE_int32(write_buffer_size,
static_cast<int32_t>(rocksdb::Options().write_buffer_size),
"Number of bytes to buffer in memtable before compacting");
DEFINE_int32(max_write_buffer_number,
@ -154,7 +155,8 @@ DEFINE_int32(level0_stop_writes_trigger,
rocksdb::Options().level0_stop_writes_trigger,
"Number of files in level-0 that will trigger put stop.");
DEFINE_int32(block_size, rocksdb::BlockBasedTableOptions().block_size,
DEFINE_int32(block_size,
static_cast<int32_t>(rocksdb::BlockBasedTableOptions().block_size),
"Number of bytes in a block.");
DEFINE_int32(max_background_compactions,
@ -573,9 +575,9 @@ class SharedState {
explicit SharedState(StressTest* stress_test)
: cv_(&mu_),
seed_(FLAGS_seed),
seed_(static_cast<uint32_t>(FLAGS_seed)),
max_key_(FLAGS_max_key),
log2_keys_per_lock_(FLAGS_log2_keys_per_lock),
log2_keys_per_lock_(static_cast<uint32_t>(FLAGS_log2_keys_per_lock)),
num_threads_(FLAGS_threads),
num_initialized_(0),
num_populated_(0),
@ -1451,7 +1453,7 @@ class StressTest {
assert(count <=
(static_cast<int64_t>(1) << ((8 - FLAGS_prefix_size) * 8)));
if (iter->status().ok()) {
thread->stats.AddPrefixes(1, count);
thread->stats.AddPrefixes(1, static_cast<int>(count));
} else {
thread->stats.AddErrors(1);
}
@ -1489,7 +1491,8 @@ class StressTest {
} else {
MultiPut(thread, write_opts, column_family, key, v, sz);
}
PrintKeyValue(rand_column_family, rand_key, value, sz);
PrintKeyValue(rand_column_family, static_cast<uint32_t>(rand_key),
value, sz);
} else if (writeBound <= prob_op && prob_op < delBound) {
// OPERATION delete
if (!FLAGS_test_batches_snapshots) {
@ -1553,16 +1556,19 @@ class StressTest {
from_db = iter->value().ToString();
iter->Next();
} else if (iter->key().compare(k) < 0) {
VerificationAbort(shared, "An out of range key was found", cf, i);
VerificationAbort(shared, "An out of range key was found",
static_cast<int>(cf), i);
}
} else {
// The iterator found no value for the key in question, so do not
// move to the next item in the iterator
s = Status::NotFound(Slice());
}
VerifyValue(cf, i, options, shared, from_db, s, true);
VerifyValue(static_cast<int>(cf), i, options, shared, from_db, s,
true);
if (from_db.length()) {
PrintKeyValue(cf, i, from_db.data(), from_db.length());
PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
from_db.data(), from_db.length());
}
}
} else {
@ -1575,9 +1581,11 @@ class StressTest {
std::string keystr = Key(i);
Slice k = keystr;
Status s = db_->Get(options, column_families_[cf], k, &from_db);
VerifyValue(cf, i, options, shared, from_db, s, true);
VerifyValue(static_cast<int>(cf), i, options, shared, from_db, s,
true);
if (from_db.length()) {
PrintKeyValue(cf, i, from_db.data(), from_db.length());
PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
from_db.data(), from_db.length());
}
}
}

@ -18,8 +18,7 @@ Status AutoRollLogger::ResetLogger() {
return status_;
}
if (logger_->GetLogFileSize() ==
(size_t)Logger::DO_NOT_SUPPORT_GET_LOG_FILE_SIZE) {
if (logger_->GetLogFileSize() == Logger::kDoNotSupportGetLogFileSize) {
status_ = Status::NotSupported(
"The underlying logger doesn't support GetLogFileSize()");
}

@ -122,7 +122,7 @@ uint64_t AutoRollLoggerTest::RollLogFileByTimeTest(
}
// -- Make the log file expire
sleep(time);
sleep(static_cast<unsigned int>(time));
LogMessage(logger, log_message.c_str());
// At this time, the new log file should be created.

@ -206,7 +206,8 @@ static double RunBenchmarkGetNSPerIteration(const BenchmarkFun& fun,
size_t actualEpochs = 0;
for (; actualEpochs < epochs; ++actualEpochs) {
for (unsigned int n = FLAGS_bm_min_iters; n < (1UL << 30); n *= 2) {
for (unsigned int n = static_cast<unsigned int>(FLAGS_bm_min_iters);
n < (1UL << 30); n *= 2) {
auto const nsecs = fun(n);
if (nsecs < minNanoseconds) {
continue;

@ -10,35 +10,35 @@
namespace rocksdb {
BENCHMARK(insertFrontVector) {
std::vector<int> v;
for (int i = 0; i < 100; i++) {
std::vector<size_t> v;
for (size_t i = 0; i < 100; i++) {
v.insert(v.begin(), i);
}
}
BENCHMARK_RELATIVE(insertBackVector) {
std::vector<int> v;
std::vector<size_t> v;
for (size_t i = 0; i < 100; i++) {
v.insert(v.end(), i);
}
}
BENCHMARK_N(insertFrontVector_n, n) {
std::vector<int> v;
std::vector<size_t> v;
for (size_t i = 0; i < n; i++) {
v.insert(v.begin(), i);
}
}
BENCHMARK_RELATIVE_N(insertBackVector_n, n) {
std::vector<int> v;
std::vector<size_t> v;
for (size_t i = 0; i < n; i++) {
v.insert(v.end(), i);
}
}
BENCHMARK_N(insertFrontEnd_n, n) {
std::vector<int> v;
std::vector<size_t> v;
for (size_t i = 0; i < n; i++) {
v.insert(v.begin(), i);
}
@ -48,7 +48,7 @@ BENCHMARK_N(insertFrontEnd_n, n) {
}
BENCHMARK_RELATIVE_N(insertFrontEndSuspend_n, n) {
std::vector<int> v;
std::vector<size_t> v;
for (size_t i = 0; i < n; i++) {
v.insert(v.begin(), i);
}

@ -132,7 +132,9 @@ BlobStore::~BlobStore() {
Status BlobStore::Put(const Slice& value, Blob* blob) {
// convert size to number of blocks
Status s = Allocate((value.size() + block_size_ - 1) / block_size_, blob);
Status s = Allocate(
static_cast<uint32_t>((value.size() + block_size_ - 1) / block_size_),
blob);
if (!s.ok()) {
return s;
}

@ -55,7 +55,8 @@ class FullFilterBitsBuilder : public FilterBitsBuilder {
// +----------------------------------------------------------------+
virtual Slice Finish(std::unique_ptr<const char[]>* buf) override {
uint32_t total_bits, num_lines;
char* data = ReserveSpace(hash_entries_.size(), &total_bits, &num_lines);
char* data = ReserveSpace(static_cast<int>(hash_entries_.size()),
&total_bits, &num_lines);
assert(data);
if (total_bits != 0 && num_lines != 0) {
@ -111,7 +112,7 @@ char* FullFilterBitsBuilder::ReserveSpace(const int num_entry,
assert(bits_per_key_);
char* data = nullptr;
if (num_entry != 0) {
uint32_t total_bits_tmp = num_entry * bits_per_key_;
uint32_t total_bits_tmp = num_entry * static_cast<uint32_t>(bits_per_key_);
*total_bits = GetTotalBitsForLocality(total_bits_tmp);
*num_lines = *total_bits / (CACHE_LINE_SIZE * 8);
@ -152,8 +153,9 @@ class FullFilterBitsReader : public FilterBitsReader {
public:
explicit FullFilterBitsReader(const Slice& contents)
: data_(const_cast<char*>(contents.data())),
data_len_(contents.size()),
num_probes_(0), num_lines_(0) {
data_len_(static_cast<uint32_t>(contents.size())),
num_probes_(0),
num_lines_(0) {
assert(data_);
GetFilterMeta(contents, &num_probes_, &num_lines_);
// Sanitize broken parameter
@ -210,7 +212,7 @@ class FullFilterBitsReader : public FilterBitsReader {
void FullFilterBitsReader::GetFilterMeta(const Slice& filter,
size_t* num_probes, uint32_t* num_lines) {
uint32_t len = filter.size();
uint32_t len = static_cast<uint32_t>(filter.size());
if (len <= 5) {
// filter is empty or broken
*num_probes = 0;
@ -225,7 +227,7 @@ void FullFilterBitsReader::GetFilterMeta(const Slice& filter,
bool FullFilterBitsReader::HashMayMatch(const uint32_t& hash,
const Slice& filter, const size_t& num_probes,
const uint32_t& num_lines) {
uint32_t len = filter.size();
uint32_t len = static_cast<uint32_t>(filter.size());
if (len <= 5) return false; // remain the same with original filter
// It is ensured the params are valid before calling it

@ -79,7 +79,8 @@ class BloomTest {
key_slices.push_back(Slice(keys_[i]));
}
filter_.clear();
policy_->CreateFilter(&key_slices[0], key_slices.size(), &filter_);
policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
&filter_);
keys_.clear();
if (kVerbose >= 2) DumpFilter();
}

@ -145,7 +145,7 @@ class LRUCache {
// Separate from constructor so caller can easily make an array of LRUCache
void SetCapacity(size_t capacity) { capacity_ = capacity; }
void SetRemoveScanCountLimit(size_t remove_scan_count_limit) {
void SetRemoveScanCountLimit(uint32_t remove_scan_count_limit) {
remove_scan_count_limit_ = remove_scan_count_limit;
}

@ -28,7 +28,9 @@ static int DecodeKey(const Slice& k) {
return DecodeFixed32(k.data());
}
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
static int DecodeValue(void* v) {
return static_cast<int>(reinterpret_cast<uintptr_t>(v));
}
class CacheTest {
public:

@ -157,7 +157,7 @@ inline void PutFixed64(std::string* dst, uint64_t value) {
inline void PutVarint32(std::string* dst, uint32_t v) {
char buf[5];
char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf);
dst->append(buf, static_cast<size_t>(ptr - buf));
}
inline char* EncodeVarint64(char* dst, uint64_t v) {
@ -174,11 +174,11 @@ inline char* EncodeVarint64(char* dst, uint64_t v) {
inline void PutVarint64(std::string* dst, uint64_t v) {
char buf[10];
char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf);
dst->append(buf, static_cast<size_t>(ptr - buf));
}
inline void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
PutVarint32(dst, value.size());
PutVarint32(dst, static_cast<uint32_t>(value.size()));
dst->append(value.data(), value.size());
}
@ -219,7 +219,7 @@ inline bool GetVarint32(Slice* input, uint32_t* value) {
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
*input = Slice(q, static_cast<size_t>(limit - q));
return true;
}
}
@ -231,7 +231,7 @@ inline bool GetVarint64(Slice* input, uint64_t* value) {
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
*input = Slice(q, static_cast<size_t>(limit - q));
return true;
}
}

@ -298,14 +298,14 @@ static inline uint64_t LE_LOAD64(const uint8_t *p) {
#endif
static inline void Slow_CRC32(uint64_t* l, uint8_t const **p) {
uint32_t c = *l ^ LE_LOAD32(*p);
uint32_t c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
*p += 4;
*l = table3_[c & 0xff] ^
table2_[(c >> 8) & 0xff] ^
table1_[(c >> 16) & 0xff] ^
table0_[c >> 24];
// DO it twice.
c = *l ^ LE_LOAD32(*p);
c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
*p += 4;
*l = table3_[c & 0xff] ^
table2_[(c >> 8) & 0xff] ^
@ -362,7 +362,7 @@ uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) {
}
#undef STEP1
#undef ALIGN
return l ^ 0xffffffffu;
return static_cast<uint32_t>(l ^ 0xffffffffu);
}
// Detect if SS42 or not.

@ -153,15 +153,15 @@ TEST(DynamicBloomTest, perf) {
return;
}
for (uint64_t m = 1; m <= 8; ++m) {
for (uint32_t m = 1; m <= 8; ++m) {
Arena arena;
const uint64_t num_keys = m * 8 * 1024 * 1024;
fprintf(stderr, "testing %" PRIu64 "M keys\n", m * 8);
const uint32_t num_keys = m * 8 * 1024 * 1024;
fprintf(stderr, "testing %" PRIu32 "M keys\n", m * 8);
DynamicBloom std_bloom(&arena, num_keys * 10, 0, num_probes);
timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) {
for (uint32_t i = 1; i <= num_keys; ++i) {
std_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
}
@ -169,9 +169,9 @@ TEST(DynamicBloomTest, perf) {
fprintf(stderr, "standard bloom, avg add latency %" PRIu64 "\n",
elapsed / num_keys);
uint64_t count = 0;
uint32_t count = 0;
timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) {
for (uint32_t i = 1; i <= num_keys; ++i) {
if (std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 8))) {
++count;
}
@ -185,7 +185,7 @@ TEST(DynamicBloomTest, perf) {
DynamicBloom blocked_bloom(&arena, num_keys * 10, 1, num_probes);
timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) {
for (uint32_t i = 1; i <= num_keys; ++i) {
blocked_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
}
@ -196,7 +196,7 @@ TEST(DynamicBloomTest, perf) {
count = 0;
timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) {
for (uint32_t i = 1; i <= num_keys; ++i) {
if (blocked_bloom.MayContain(
Slice(reinterpret_cast<const char*>(&i), 8))) {
++count;

@ -1594,7 +1594,8 @@ class PosixEnv : public Env {
void (*function)(void*) = queue_.front().function;
void* arg = queue_.front().arg;
queue_.pop_front();
queue_len_.store(queue_.size(), std::memory_order_relaxed);
queue_len_.store(static_cast<unsigned int>(queue_.size()),
std::memory_order_relaxed);
bool decrease_io_priority = (low_io_priority != low_io_priority_);
PthreadCall("unlock", pthread_mutex_unlock(&mu_));
@ -1709,7 +1710,8 @@ class PosixEnv : public Env {
queue_.push_back(BGItem());
queue_.back().function = function;
queue_.back().arg = arg;
queue_len_.store(queue_.size(), std::memory_order_relaxed);
queue_len_.store(static_cast<unsigned int>(queue_.size()),
std::memory_order_relaxed);
if (!HasExcessiveThread()) {
// Wake up at least one waiting thread.

@ -18,7 +18,7 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
const uint32_t m = 0xc6a4a793;
const uint32_t r = 24;
const char* limit = data + n;
uint32_t h = seed ^ (n * m);
uint32_t h = static_cast<uint32_t>(seed ^ (n * m));
// Pick up four bytes at a time
while (data + 4 <= limit) {

@ -213,9 +213,10 @@ class HashCuckooRep : public MemTableRep {
static const int kMurmurHashSeeds[HashCuckooRepFactory::kMaxHashCount] = {
545609244, 1769731426, 763324157, 13099088, 592422103,
1899789565, 248369300, 1984183468, 1613664382, 1491157517};
return MurmurHash(slice.data(), slice.size(),
return static_cast<unsigned int>(
MurmurHash(slice.data(), static_cast<int>(slice.size()),
kMurmurHashSeeds[hash_func_id]) %
bucket_count_;
bucket_count_);
}
// A cuckoo path is a sequence of bucket ids, where each id points to a

@ -200,7 +200,8 @@ class HashLinkListRep : public MemTableRep {
}
size_t GetHash(const Slice& slice) const {
return MurmurHash(slice.data(), slice.size(), 0) % bucket_size_;
return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0) %
bucket_size_;
}
Pointer* GetBucket(size_t i) const {

@ -65,7 +65,8 @@ class HashSkipListRep : public MemTableRep {
Arena* const arena_;
inline size_t GetHash(const Slice& slice) const {
return MurmurHash(slice.data(), slice.size(), 0) % bucket_size_;
return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0) %
bucket_size_;
}
inline Bucket* GetBucket(size_t i) const {
return buckets_[i].load(std::memory_order_acquire);

@ -971,8 +971,9 @@ void DBDumperCommand::DoCommand() {
uint64_t s1=0,s2=0;
// At this point, bucket_size=0 => time_range=0
uint64_t num_buckets = (bucket_size >= time_range) ? 1 :
((time_range + bucket_size - 1) / bucket_size);
int num_buckets = (bucket_size >= time_range)
? 1
: ((time_range + bucket_size - 1) / bucket_size);
vector<uint64_t> bucket_counts(num_buckets, 0);
if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) {
fprintf(stdout, "Dumping key-values from %s to %s\n",

@ -19,9 +19,14 @@ namespace rocksdb {
class MemFile {
public:
explicit MemFile(const std::string& fn) :
fn_(fn), refs_(0), size_(0), modified_time_(Now()),
rnd_((uint32_t)MurmurHash(fn.data(), fn.size(), 0)), fsynced_bytes_(0) {}
explicit MemFile(const std::string& fn)
: fn_(fn),
refs_(0),
size_(0),
modified_time_(Now()),
rnd_(static_cast<uint32_t>(
MurmurHash(fn.data(), static_cast<int>(fn.size()), 0))),
fsynced_bytes_(0) {}
void Ref() {
MutexLock lock(&mutex_);
@ -61,7 +66,8 @@ class MemFile {
return;
}
uint64_t buffered_bytes = size_ - fsynced_bytes_;
uint64_t start = fsynced_bytes_ + rnd_.Uniform(buffered_bytes);
uint64_t start =
fsynced_bytes_ + rnd_.Uniform(static_cast<int>(buffered_bytes));
uint64_t end = std::min(start + 512, size_.load());
MutexLock lock(&mutex_);
for (uint64_t pos = start; pos < end; ++pos) {

@ -36,7 +36,7 @@ typedef unsigned int murmur_t;
namespace rocksdb {
struct murmur_hash {
size_t operator()(const Slice& slice) const {
return MurmurHash(slice.data(), slice.size(), 0);
return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0);
}
};
} // rocksdb

@ -110,7 +110,7 @@ void MutableCFOptions::Dump(Logger* log) const {
expanded_compaction_factor);
Log(log, " source_compaction_factor: %d",
source_compaction_factor);
Log(log, " target_file_size_base: %d",
Log(log, " target_file_size_base: %" PRIu64,
target_file_size_base);
Log(log, " target_file_size_multiplier: %d",
target_file_size_multiplier);

@ -108,7 +108,7 @@ struct MutableCFOptions {
int max_grandparent_overlap_factor;
int expanded_compaction_factor;
int source_compaction_factor;
int target_file_size_base;
uint64_t target_file_size_base;
int target_file_size_multiplier;
uint64_t max_bytes_for_level_base;
int max_bytes_for_level_multiplier;

@ -95,7 +95,7 @@ void PickWriteBufferSize(size_t total_write_buffer_limit, Options* options) {
options->write_buffer_size = write_buffer_size;
options->max_write_buffer_number =
total_write_buffer_limit / write_buffer_size;
static_cast<int>(total_write_buffer_limit / write_buffer_size);
options->min_write_buffer_number_to_merge = 1;
}
@ -147,10 +147,10 @@ void OptimizeForLevel(int read_amplification_threshold,
// This doesn't consider compaction and overheads of mem tables. But usually
// it is in the same order of magnitude.
int expected_level0_compaction_size =
size_t expected_level0_compaction_size =
options->level0_file_num_compaction_trigger * options->write_buffer_size;
// Enlarge level1 target file size if level0 compaction size is larger.
int max_bytes_for_level_base = 10 * kBytesForOneMb;
uint64_t max_bytes_for_level_base = 10 * kBytesForOneMb;
if (expected_level0_compaction_size > max_bytes_for_level_base) {
max_bytes_for_level_base = expected_level0_compaction_size;
}
@ -160,7 +160,7 @@ void OptimizeForLevel(int read_amplification_threshold,
const int kMinFileSize = 2 * kBytesForOneMb;
// Allow at least 3-way parallelism for compaction between level 1 and 2.
int max_file_size = max_bytes_for_level_base / 3;
uint64_t max_file_size = max_bytes_for_level_base / 3;
if (max_file_size < kMinFileSize) {
options->target_file_size_base = kMinFileSize;
} else {

@ -40,12 +40,10 @@ bool ParseBoolean(const std::string& type, const std::string& value) {
throw type;
}
}
uint32_t ParseInt(const std::string& value) {
return std::stoi(value);
}
int ParseInt(const std::string& value) { return std::stoi(value); }
uint32_t ParseUint32(const std::string& value) {
return std::stoul(value);
return static_cast<uint32_t>(std::stoul(value));
}
uint64_t ParseUint64(const std::string& value) {
@ -82,9 +80,9 @@ bool ParseMemtableOptions(const std::string& name, const std::string& value,
} else if (name == "arena_block_size") {
new_options->arena_block_size = ParseInt64(value);
} else if (name == "memtable_prefix_bloom_bits") {
new_options->memtable_prefix_bloom_bits = stoul(value);
new_options->memtable_prefix_bloom_bits = ParseUint32(value);
} else if (name == "memtable_prefix_bloom_probes") {
new_options->memtable_prefix_bloom_probes = stoul(value);
new_options->memtable_prefix_bloom_probes = ParseUint32(value);
} else if (name == "memtable_prefix_bloom_huge_page_tlb_size") {
new_options->memtable_prefix_bloom_huge_page_tlb_size =
ParseInt64(value);

@ -47,7 +47,8 @@ GenericRateLimiter::GenericRateLimiter(
GenericRateLimiter::~GenericRateLimiter() {
MutexLock g(&request_mutex_);
stop_ = true;
requests_to_wait_ = queue_[Env::IO_LOW].size() + queue_[Env::IO_HIGH].size();
requests_to_wait_ = static_cast<int32_t>(queue_[Env::IO_LOW].size() +
queue_[Env::IO_HIGH].size());
for (auto& r : queue_[Env::IO_HIGH]) {
r->cv.Signal();
}

@ -30,12 +30,12 @@ TEST(RateLimiterTest, StartStop) {
TEST(RateLimiterTest, Rate) {
auto* env = Env::Default();
struct Arg {
Arg(int64_t _target_rate, int _burst)
Arg(int32_t _target_rate, int _burst)
: limiter(new GenericRateLimiter(_target_rate, 100 * 1000, 10)),
request_size(_target_rate / 10),
burst(_burst) {}
std::unique_ptr<RateLimiter> limiter;
int64_t request_size;
int32_t request_size;
int burst;
};
@ -51,13 +51,12 @@ TEST(RateLimiterTest, Rate) {
arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1,
Env::IO_HIGH);
}
arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1,
Env::IO_LOW);
arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1, Env::IO_LOW);
}
};
for (int i = 1; i <= 16; i*=2) {
int64_t target = i * 1024 * 10;
int32_t target = i * 1024 * 10;
Arg arg(target, i / 4 + 1);
auto start = env->NowMicros();
for (int t = 0; t < i; ++t) {
@ -68,7 +67,7 @@ TEST(RateLimiterTest, Rate) {
auto elapsed = env->NowMicros() - start;
double rate = arg.limiter->GetTotalBytesThrough()
* 1000000.0 / elapsed;
fprintf(stderr, "request size [1 - %" PRIi64 "], limit %" PRIi64
fprintf(stderr, "request size [1 - %" PRIi32 "], limit %" PRIi32
" KB/sec, actual rate: %lf KB/sec, elapsed %.2lf seconds\n",
arg.request_size - 1, target / 1024, rate / 1024,
elapsed / 1000000.0);

@ -23,8 +23,8 @@ const char* Status::CopyState(const char* state) {
Status::Status(Code _code, const Slice& msg, const Slice& msg2) : code_(_code) {
assert(code_ != kOk);
const uint32_t len1 = msg.size();
const uint32_t len2 = msg2.size();
const uint32_t len1 = static_cast<uint32_t>(msg.size());
const uint32_t len2 = static_cast<uint32_t>(msg2.size());
const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
char* result = new char[size + 4];
memcpy(result, &size, sizeof(size));

@ -54,7 +54,8 @@ class BackupRateLimiter {
(bytes_since_start_ * kMicrosInSecond) / max_bytes_per_second_;
if (should_take_micros > interval) {
env_->SleepForMicroseconds(should_take_micros - interval);
env_->SleepForMicroseconds(
static_cast<int>(should_take_micros - interval));
now = env_->NowMicros();
}
// reset interval
@ -165,9 +166,7 @@ class BackupEngineImpl : public BackupEngine {
uint64_t GetSize() const {
return size_;
}
uint32_t GetNumberFiles() {
return files_.size();
}
uint32_t GetNumberFiles() { return static_cast<uint32_t>(files_.size()); }
void SetSequenceNumber(uint64_t sequence_number) {
sequence_number_ = sequence_number;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save