Fix Mac compile

main
Igor Canadi 10 years ago
parent 092f97e219
commit a52cecb56c
  1. 43
      db/db_test.cc
  2. 5
      utilities/backupable/backupable_db.cc

@ -6142,8 +6142,8 @@ std::vector<std::uint64_t> ListTableFiles(Env* env, const std::string& path) {
return ListSpecificFiles(env, path, kTableFile); return ListSpecificFiles(env, path, kTableFile);
} }
std::uint64_t GetNumberOfSstFilesForColumnFamily( uint64_t GetNumberOfSstFilesForColumnFamily(DB* db,
DB* db, std::string column_family_name) { std::string column_family_name) {
std::vector<LiveFileMetaData> metadata; std::vector<LiveFileMetaData> metadata;
db->GetLiveFilesMetaData(&metadata); db->GetLiveFilesMetaData(&metadata);
uint64_t result = 0; uint64_t result = 0;
@ -6200,9 +6200,10 @@ TEST(DBTest, RecoverCheckFileAmountWithSmallWriteBuffer) {
dbfull()->TEST_WaitForFlushMemTable(handles_[2]); dbfull()->TEST_WaitForFlushMemTable(handles_[2]);
{ {
auto tables = ListTableFiles(env_, dbname_); auto tables = ListTableFiles(env_, dbname_);
ASSERT_EQ(tables.size(), 1); ASSERT_EQ(tables.size(), static_cast<size_t>(1));
// Make sure 'dobrynia' was flushed: check sst files amount // Make sure 'dobrynia' was flushed: check sst files amount
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), 1); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(1));
} }
// New WAL file // New WAL file
ASSERT_OK(Put(1, Key(1), DummyString(1))); ASSERT_OK(Put(1, Key(1), DummyString(1)));
@ -6216,13 +6217,17 @@ TEST(DBTest, RecoverCheckFileAmountWithSmallWriteBuffer) {
&options); &options);
{ {
// No inserts => default is empty // No inserts => default is empty
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), 0); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(0));
// First 4 keys goes to separate SSTs + 1 more SST for 2 smaller keys // First 4 keys goes to separate SSTs + 1 more SST for 2 smaller keys
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), 5); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
static_cast<uint64_t>(5));
// 1 SST for big key + 1 SST for small one // 1 SST for big key + 1 SST for small one
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), 2); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(2));
// 1 SST for all keys // 1 SST for all keys
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), 1); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(1));
} }
} }
@ -6247,8 +6252,9 @@ TEST(DBTest, RecoverCheckFileAmount) {
// 4 memtable are not flushed, 1 sst file // 4 memtable are not flushed, 1 sst file
{ {
auto tables = ListTableFiles(env_, dbname_); auto tables = ListTableFiles(env_, dbname_);
ASSERT_EQ(tables.size(), 1); ASSERT_EQ(tables.size(), static_cast<size_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), 1); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(1));
} }
// Memtable for 'nikitich' has flushed, new WAL file has opened // Memtable for 'nikitich' has flushed, new WAL file has opened
// 4 memtable still not flushed // 4 memtable still not flushed
@ -6270,8 +6276,9 @@ TEST(DBTest, RecoverCheckFileAmount) {
{ {
auto tables = ListTableFiles(env_, dbname_); auto tables = ListTableFiles(env_, dbname_);
ASSERT_EQ(tables.size(), 2); ASSERT_EQ(tables.size(), static_cast<size_t>(2));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), 2); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(2));
} }
ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"}, ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"},
@ -6282,10 +6289,14 @@ TEST(DBTest, RecoverCheckFileAmount) {
// first, second and third WALs went to the same SST. // first, second and third WALs went to the same SST.
// So, there is 6 SSTs: three for 'nikitich', one for 'default', one for // So, there is 6 SSTs: three for 'nikitich', one for 'default', one for
// 'dobrynia', one for 'pikachu' // 'dobrynia', one for 'pikachu'
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), 1); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), 3); static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), 1); ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), 1); static_cast<uint64_t>(3));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
static_cast<uint64_t>(1));
} }
} }

@ -589,8 +589,9 @@ Status BackupEngineImpl::CreateNewBackup(DB* db, bool flush_before_backup) {
double backup_speed = new_backup.GetSize() / (1.048576 * backup_time); double backup_speed = new_backup.GetSize() / (1.048576 * backup_time);
Log(options_.info_log, "Backup number of files: %u", Log(options_.info_log, "Backup number of files: %u",
new_backup.GetNumberFiles()); new_backup.GetNumberFiles());
Log(options_.info_log, "Backup size: %lu bytes", new_backup.GetSize()); Log(options_.info_log, "Backup size: %" PRIu64 " bytes",
Log(options_.info_log, "Backup time: %lu microseconds", backup_time); new_backup.GetSize());
Log(options_.info_log, "Backup time: %" PRIu64 " microseconds", backup_time);
Log(options_.info_log, "Backup speed: %.3f MB/s", backup_speed); Log(options_.info_log, "Backup speed: %.3f MB/s", backup_speed);
Log(options_.info_log, "Backup Statistics %s", Log(options_.info_log, "Backup Statistics %s",
backup_statistics_.ToString().c_str()); backup_statistics_.ToString().c_str());

Loading…
Cancel
Save