Enable db_test running in Centos 32 bit OS and Alpine 32 bit OS (#9294)

Summary:
Closes https://github.com/facebook/rocksdb/issues/9271

Pull Request resolved: https://github.com/facebook/rocksdb/pull/9294

Reviewed By: riversand963, hx235

Differential Revision: D33586002

Pulled By: pdillinger

fbshipit-source-id: 3d1a2fa71023e108613ff03dbd37a5f954fc4920
main
Si Ke 3 years ago committed by Facebook GitHub Bot
parent 5602b1d3d9
commit 93b1de4f45
  1. 25
      db/db_test.cc
  2. 2
      env/fs_posix.cc
  3. 2
      include/rocksdb/table.h
  4. 3
      table/block_based/block_based_table_builder.cc
  5. 2
      table/block_based/block_based_table_factory.cc

@ -2576,9 +2576,7 @@ static const int kNumKeys = 1000;
struct MTState { struct MTState {
DBTest* test; DBTest* test;
std::atomic<bool> stop;
std::atomic<int> counter[kNumThreads]; std::atomic<int> counter[kNumThreads];
std::atomic<bool> thread_done[kNumThreads];
}; };
struct MTThread { struct MTThread {
@ -2592,10 +2590,13 @@ static void MTThreadBody(void* arg) {
int id = t->id; int id = t->id;
DB* db = t->state->test->db_; DB* db = t->state->test->db_;
int counter = 0; int counter = 0;
std::shared_ptr<SystemClock> clock = SystemClock::Default();
auto end_micros = clock->NowMicros() + kTestSeconds * 1000000U;
fprintf(stderr, "... starting thread %d\n", id); fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id); Random rnd(1000 + id);
char valbuf[1500]; char valbuf[1500];
while (t->state->stop.load(std::memory_order_acquire) == false) { while (clock->NowMicros() < end_micros) {
t->state->counter[id].store(counter, std::memory_order_release); t->state->counter[id].store(counter, std::memory_order_release);
int key = rnd.Uniform(kNumKeys); int key = rnd.Uniform(kNumKeys);
@ -2692,7 +2693,6 @@ static void MTThreadBody(void* arg) {
} }
counter++; counter++;
} }
t->state->thread_done[id].store(true, std::memory_order_release);
fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter)); fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
} }
@ -2731,10 +2731,8 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) {
// Initialize state // Initialize state
MTState mt; MTState mt;
mt.test = this; mt.test = this;
mt.stop.store(false, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) { for (int id = 0; id < kNumThreads; id++) {
mt.counter[id].store(0, std::memory_order_release); mt.counter[id].store(0, std::memory_order_release);
mt.thread_done[id].store(false, std::memory_order_release);
} }
// Start threads // Start threads
@ -2746,16 +2744,7 @@ TEST_P(MultiThreadedDBTest, MultiThreaded) {
env_->StartThread(MTThreadBody, &thread[id]); env_->StartThread(MTThreadBody, &thread[id]);
} }
// Let them run for a while env_->WaitForJoin();
env_->SleepForMicroseconds(kTestSeconds * 1000000);
// Stop the threads and wait for them to finish
mt.stop.store(true, std::memory_order_release);
for (int id = 0; id < kNumThreads; id++) {
while (mt.thread_done[id].load(std::memory_order_acquire) == false) {
env_->SleepForMicroseconds(100000);
}
}
} }
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(
@ -4942,6 +4931,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 0);
ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4), ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4),
120U * 4000U + 50U * 24); 120U * 4000U + 50U * 24);
// Make sure data in files in L3 is not compacted by removing all files // Make sure data in files in L3 is not compacted by removing all files
@ -7010,7 +7000,8 @@ TEST_F(DBTest, MemoryUsageWithMaxWriteBufferSizeToMaintain) {
if ((size_all_mem_table > cur_active_mem) && if ((size_all_mem_table > cur_active_mem) &&
(cur_active_mem >= (cur_active_mem >=
static_cast<uint64_t>(options.max_write_buffer_size_to_maintain)) && static_cast<uint64_t>(options.max_write_buffer_size_to_maintain)) &&
(size_all_mem_table > options.max_write_buffer_size_to_maintain + (size_all_mem_table >
static_cast<uint64_t>(options.max_write_buffer_size_to_maintain) +
options.write_buffer_size)) { options.write_buffer_size)) {
ASSERT_FALSE(memory_limit_exceeded); ASSERT_FALSE(memory_limit_exceeded);
memory_limit_exceeded = true; memory_limit_exceeded = true;

2
env/fs_posix.cc vendored

@ -238,7 +238,7 @@ class PosixFileSystem : public FileSystem {
} }
SetFD_CLOEXEC(fd, &options); SetFD_CLOEXEC(fd, &options);
if (options.use_mmap_reads && sizeof(void*) >= 8) { if (options.use_mmap_reads) {
// Use of mmap for random reads has been removed because it // Use of mmap for random reads has been removed because it
// kills performance when storage is fast. // kills performance when storage is fast.
// Use mmap when virtual address-space is plentiful. // Use mmap when virtual address-space is plentiful.

@ -259,7 +259,7 @@ struct BlockBasedTableOptions {
// block size specified here corresponds to uncompressed data. The // block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if // actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically. // compression is enabled. This parameter can be changed dynamically.
size_t block_size = 4 * 1024; uint64_t block_size = 4 * 1024;
// This is used to close a block before it reaches the configured // This is used to close a block before it reaches the configured
// 'block_size'. If the percentage of free space in the current block is less // 'block_size'. If the percentage of free space in the current block is less

@ -406,7 +406,8 @@ struct BlockBasedTableBuilder::Rep {
file(f), file(f),
offset(0), offset(0),
alignment(table_options.block_align alignment(table_options.block_align
? std::min(table_options.block_size, kDefaultPageSize) ? std::min(static_cast<size_t>(table_options.block_size),
kDefaultPageSize)
: 0), : 0),
data_block(table_options.block_restart_interval, data_block(table_options.block_restart_interval,
table_options.use_delta_encoding, table_options.use_delta_encoding,

@ -778,7 +778,7 @@ std::string BlockBasedTableFactory::GetPrintableOptions() const {
ret.append(buffer); ret.append(buffer);
ret.append(table_options_.persistent_cache->GetPrintableOptions()); ret.append(table_options_.persistent_cache->GetPrintableOptions());
} }
snprintf(buffer, kBufferSize, " block_size: %" ROCKSDB_PRIszt "\n", snprintf(buffer, kBufferSize, " block_size: %" PRIu64 "\n",
table_options_.block_size); table_options_.block_size);
ret.append(buffer); ret.append(buffer);
snprintf(buffer, kBufferSize, " block_size_deviation: %d\n", snprintf(buffer, kBufferSize, " block_size_deviation: %d\n",

Loading…
Cancel
Save