diff --git a/db/db_test.cc b/db/db_test.cc index 051d99f2d..1d0852845 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2576,9 +2576,7 @@ static const int kNumKeys = 1000; struct MTState { DBTest* test; - std::atomic stop; std::atomic counter[kNumThreads]; - std::atomic thread_done[kNumThreads]; }; struct MTThread { @@ -2592,10 +2590,13 @@ static void MTThreadBody(void* arg) { int id = t->id; DB* db = t->state->test->db_; int counter = 0; + std::shared_ptr clock = SystemClock::Default(); + auto end_micros = clock->NowMicros() + kTestSeconds * 1000000U; + fprintf(stderr, "... starting thread %d\n", id); Random rnd(1000 + id); char valbuf[1500]; - while (t->state->stop.load(std::memory_order_acquire) == false) { + while (clock->NowMicros() < end_micros) { t->state->counter[id].store(counter, std::memory_order_release); int key = rnd.Uniform(kNumKeys); @@ -2692,7 +2693,6 @@ static void MTThreadBody(void* arg) { } counter++; } - t->state->thread_done[id].store(true, std::memory_order_release); fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter)); } @@ -2731,10 +2731,8 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) { // Initialize state MTState mt; mt.test = this; - mt.stop.store(false, std::memory_order_release); for (int id = 0; id < kNumThreads; id++) { mt.counter[id].store(0, std::memory_order_release); - mt.thread_done[id].store(false, std::memory_order_release); } // Start threads @@ -2746,16 +2744,7 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) { env_->StartThread(MTThreadBody, &thread[id]); } - // Let them run for a while - env_->SleepForMicroseconds(kTestSeconds * 1000000); - - // Stop the threads and wait for them to finish - mt.stop.store(true, std::memory_order_release); - for (int id = 0; id < kNumThreads; id++) { - while (mt.thread_done[id].load(std::memory_order_acquire) == false) { - env_->SleepForMicroseconds(100000); - } - } + env_->WaitForJoin(); } INSTANTIATE_TEST_CASE_P( @@ -4942,6 +4931,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) { ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 0); + ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4), 120U * 4000U + 50U * 24); // Make sure data in files in L3 is not compacted by removing all files @@ -7010,8 +7000,9 @@ TEST_F(DBTest, MemoryUsageWithMaxWriteBufferSizeToMaintain) { if ((size_all_mem_table > cur_active_mem) && (cur_active_mem >= static_cast(options.max_write_buffer_size_to_maintain)) && - (size_all_mem_table > options.max_write_buffer_size_to_maintain + - options.write_buffer_size)) { + (size_all_mem_table > + static_cast(options.max_write_buffer_size_to_maintain) + + options.write_buffer_size)) { ASSERT_FALSE(memory_limit_exceeded); memory_limit_exceeded = true; } else { diff --git a/env/fs_posix.cc b/env/fs_posix.cc index a6b6bbd6b..e63381066 100644 --- a/env/fs_posix.cc +++ b/env/fs_posix.cc @@ -238,7 +238,7 @@ class PosixFileSystem : public FileSystem { } SetFD_CLOEXEC(fd, &options); - if (options.use_mmap_reads && sizeof(void*) >= 8) { + if (options.use_mmap_reads) { // Use of mmap for random reads has been removed because it // kills performance when storage is fast. // Use mmap when virtual address-space is plentiful. diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 3590b7f42..1e48eca72 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -259,7 +259,7 @@ struct BlockBasedTableOptions { // block size specified here corresponds to uncompressed data. The // actual size of the unit read from disk may be smaller if // compression is enabled. This parameter can be changed dynamically. - size_t block_size = 4 * 1024; + uint64_t block_size = 4 * 1024; // This is used to close a block before it reaches the configured // 'block_size'. If the percentage of free space in the current block is less diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index fb25a7ff9..7e7599aaa 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -406,7 +406,8 @@ struct BlockBasedTableBuilder::Rep { file(f), offset(0), alignment(table_options.block_align - ? std::min(table_options.block_size, kDefaultPageSize) + ? std::min(static_cast(table_options.block_size), + kDefaultPageSize) : 0), data_block(table_options.block_restart_interval, table_options.use_delta_encoding, diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index 08cd06b6c..5b1bd2e68 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -778,7 +778,7 @@ std::string BlockBasedTableFactory::GetPrintableOptions() const { ret.append(buffer); ret.append(table_options_.persistent_cache->GetPrintableOptions()); } - snprintf(buffer, kBufferSize, " block_size: %" ROCKSDB_PRIszt "\n", + snprintf(buffer, kBufferSize, " block_size: %" PRIu64 "\n", table_options_.block_size); ret.append(buffer); snprintf(buffer, kBufferSize, " block_size_deviation: %d\n",