Update MULTIGET_IO_BATCH_SIZE for non-async MultiGet (#10622)

Summary:
This stat was only getting updated in the async (coroutine) version of MultiGet.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10622

Reviewed By: akankshamahajan15

Differential Revision: D39188790

Pulled By: anand1976

fbshipit-source-id: 7e231507f65fc94c8a006c38f79dfba182a2c24a
main
anand76 2 years ago committed by Facebook GitHub Bot
parent 3a75219e5d
commit 5fbcc8c54d
  1. 1
      HISTORY.md
  2. 9
      db/db_basic_test.cc
  3. 1
      file/random_access_file_reader.cc

@ -6,6 +6,7 @@
* Fix periodic_task unable to re-register the same task type, which may cause `SetOptions()` fail to update periodical_task time like: `stats_dump_period_sec`, `stats_persist_period_sec`. * Fix periodic_task unable to re-register the same task type, which may cause `SetOptions()` fail to update periodical_task time like: `stats_dump_period_sec`, `stats_persist_period_sec`.
* Fixed a bug in the rocksdb.prefetched.bytes.discarded stat. It was counting the prefetch buffer size, rather than the actual number of bytes discarded from the buffer. * Fixed a bug in the rocksdb.prefetched.bytes.discarded stat. It was counting the prefetch buffer size, rather than the actual number of bytes discarded from the buffer.
* Fix bug where the directory containing CURRENT can left unsynced after CURRENT is updated to point to the latest MANIFEST, which leads to risk of unsync data loss of CURRENT. * Fix bug where the directory containing CURRENT can left unsynced after CURRENT is updated to point to the latest MANIFEST, which leads to risk of unsync data loss of CURRENT.
* Update rocksdb.multiget.io.batch.size stat in non-async MultiGet as well.
### Public API changes ### Public API changes
* Add `rocksdb_column_family_handle_get_id`, `rocksdb_column_family_handle_get_name` to get name, id of column family in C API * Add `rocksdb_column_family_handle_get_id`, `rocksdb_column_family_handle_get_name` to get name, id of column family in C API

@ -2261,7 +2261,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL0) {
ASSERT_EQ(multiget_io_batch_size.max, 3); ASSERT_EQ(multiget_io_batch_size.max, 3);
ASSERT_EQ(statistics()->getTickerCount(MULTIGET_COROUTINE_COUNT), 3); ASSERT_EQ(statistics()->getTickerCount(MULTIGET_COROUTINE_COUNT), 3);
} else { } else {
ASSERT_EQ(multiget_io_batch_size.count, 0); // Without Async IO, MultiGet will call MultiRead 3 times, once for each
// L0 file
ASSERT_EQ(multiget_io_batch_size.count, 3);
} }
} }
@ -2378,8 +2380,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1AndL2) {
statistics()->histogramData(MULTIGET_IO_BATCH_SIZE, &multiget_io_batch_size); statistics()->histogramData(MULTIGET_IO_BATCH_SIZE, &multiget_io_batch_size);
// There are 2 keys in L1 in twp separate files, and 1 in L2. With // There are 2 keys in L1 in twp separate files, and 1 in L2. With
// async IO, all three lookups will happen in parallel // optimize_multiget_for_io, all three lookups will happen in parallel.
ASSERT_EQ(multiget_io_batch_size.count, 1); // Otherwise, the L2 lookup will happen after L1.
ASSERT_EQ(multiget_io_batch_size.count, GetParam() ? 1 : 2);
ASSERT_EQ(multiget_io_batch_size.max, GetParam() ? 3 : 2); ASSERT_EQ(multiget_io_batch_size.max, GetParam() ? 3 : 2);
} }

@ -381,6 +381,7 @@ IOStatus RandomAccessFileReader::MultiRead(
} }
} }
io_s = file_->MultiRead(fs_reqs, num_fs_reqs, opts, nullptr); io_s = file_->MultiRead(fs_reqs, num_fs_reqs, opts, nullptr);
RecordInHistogram(stats_, MULTIGET_IO_BATCH_SIZE, num_fs_reqs);
} }
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE

Loading…
Cancel
Save