diff --git a/include/rocksdb/advanced_options.h b/include/rocksdb/advanced_options.h index 9f71cce63..fe331482e 100644 --- a/include/rocksdb/advanced_options.h +++ b/include/rocksdb/advanced_options.h @@ -502,19 +502,25 @@ struct AdvancedColumnFamilyOptions { // threshold. But it's not guaranteed. // Value 0 will be sanitized. // - // Default: result.target_file_size_base * 25 + // Default: target_file_size_base * 25 + // + // Dynamically changeable through SetOptions() API uint64_t max_compaction_bytes = 0; // All writes will be slowed down to at least delayed_write_rate if estimated // bytes needed to be compaction exceed this threshold. // // Default: 64GB + // + // Dynamically changeable through SetOptions() API uint64_t soft_pending_compaction_bytes_limit = 64 * 1073741824ull; // All writes are stopped if estimated bytes needed to be compaction exceed // this threshold. // // Default: 256GB + // + // Dynamically changeable through SetOptions() API uint64_t hard_pending_compaction_bytes_limit = 256 * 1073741824ull; // The compaction style. Default: kCompactionStyleLevel @@ -526,13 +532,17 @@ struct AdvancedColumnFamilyOptions { CompactionPri compaction_pri = kByCompensatedSize; // The options needed to support Universal Style compactions + // + // Dynamically changeable through SetOptions() API + // Dynamic change example: + // SetOptions("compaction_options_universal", "{size_ratio=2;}") CompactionOptionsUniversal compaction_options_universal; // The options for FIFO compaction style // // Dynamically changeable through SetOptions() API // Dynamic change example: - // SetOption("compaction_options_fifo", "{max_table_files_size=100;ttl=2;}") + // SetOptions("compaction_options_fifo", "{max_table_files_size=100;ttl=2;}") CompactionOptionsFIFO compaction_options_fifo; // An iteration->Next() sequentially skips over keys with the same @@ -602,7 +612,10 @@ struct AdvancedColumnFamilyOptions { bool optimize_filters_for_hits = false; // After writing every SST file, reopen it and read all the keys. + // // Default: false + // + // Dynamically changeable through SetOptions() API bool paranoid_file_checks = false; // In debug mode, RocksDB run consistency checks on the LSM every time the LSM @@ -612,7 +625,10 @@ struct AdvancedColumnFamilyOptions { bool force_consistency_checks = false; // Measure IO stats in compactions and flushes, if true. + // // Default: false + // + // Dynamically changeable through SetOptions() API bool report_bg_io_stats = false; // Non-bottom-level files older than TTL will go through the compaction diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index 63a65fa16..6d2fc5267 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -188,8 +188,7 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions { // Dynamically changeable through SetOptions() API size_t write_buffer_size = 64 << 20; - // Compress blocks using the specified compression algorithm. This - // parameter can be changed dynamically. + // Compress blocks using the specified compression algorithm. // // Default: kSnappyCompression, if it's supported. If snappy is not linked // with the library, the default is kNoCompression. @@ -212,6 +211,8 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions { // - kZlibCompression: Z_DEFAULT_COMPRESSION (currently -1) // - kLZ4HCCompression: 0 // - For all others, we do not specify a compression level + // + // Dynamically changeable through SetOptions() API CompressionType compression; // Compression algorithm that will be used for the bottommost level that @@ -416,7 +417,10 @@ struct DBOptions { // files opened are always kept open. You can estimate number of files based // on target_file_size_base and target_file_size_multiplier for level-based // compaction. For universal-style compaction, you can usually set it to -1. + // // Default: -1 + // + // Dynamically changeable through SetDBOptions() API. int max_open_files = -1; // If max_open_files is -1, DB will open all files on DB::Open(). You can @@ -431,7 +435,10 @@ struct DBOptions { // [sum of all write_buffer_size * max_write_buffer_number] * 4 // This option takes effect only when there are more than one column family as // otherwise the wal size is dictated by the write_buffer_size. + // // Default: 0 + // + // Dynamically changeable through SetDBOptions() API. uint64_t max_total_wal_size = 0; // If non-null, then we should collect metrics about database operations @@ -492,13 +499,23 @@ struct DBOptions { // value is 6 hours. The files that get out of scope by compaction // process will still get automatically delete on every compaction, // regardless of this setting + // + // Default: 6 hours + // + // Dynamically changeable through SetDBOptions() API. uint64_t delete_obsolete_files_period_micros = 6ULL * 60 * 60 * 1000000; // Maximum number of concurrent background jobs (compactions and flushes). + // + // Default: 2 + // + // Dynamically changeable through SetDBOptions() API. int max_background_jobs = 2; // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the // value of max_background_jobs. This option is ignored. + // + // Dynamically changeable through SetDBOptions() API. int base_background_compactions = -1; // NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the @@ -513,7 +530,10 @@ struct DBOptions { // If you're increasing this, also consider increasing number of threads in // LOW priority thread pool. For more information, see // Env::SetBackgroundThreads + // // Default: -1 + // + // Dynamically changeable through SetDBOptions() API. int max_background_compactions = -1; // This value represents the maximum number of threads that will @@ -642,7 +662,10 @@ struct DBOptions { bool skip_log_error_on_recovery = false; // if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + // // Default: 600 (10 min) + // + // Dynamically changeable through SetDBOptions() API. unsigned int stats_dump_period_sec = 600; // If set true, will hint the underlying file system that the file @@ -709,6 +732,8 @@ struct DBOptions { // true. // // Default: 0 + // + // Dynamically changeable through SetDBOptions() API. size_t compaction_readahead_size = 0; // This is a maximum buffer size that is used by WinMmapReadableFile in @@ -735,6 +760,8 @@ struct DBOptions { // write requests if the logical sector size is unusual // // Default: 1024 * 1024 (1 MB) + // + // Dynamically changeable through SetDBOptions() API. size_t writable_file_max_buffer_size = 1024 * 1024; @@ -757,17 +784,23 @@ struct DBOptions { // to smooth out write I/Os over time. Users shouldn't rely on it for // persistency guarantee. // Issue one request for every bytes_per_sync written. 0 turns it off. - // Default: 0 // // You may consider using rate_limiter to regulate write rate to device. // When rate limiter is enabled, it automatically enables bytes_per_sync // to 1MB. // // This option applies to table files + // + // Default: 0, turned off + // + // Dynamically changeable through SetDBOptions() API. uint64_t bytes_per_sync = 0; // Same as bytes_per_sync, but applies to WAL files + // // Default: 0, turned off + // + // Dynamically changeable through SetDBOptions() API. uint64_t wal_bytes_per_sync = 0; // A vector of EventListeners which callback functions will be called @@ -794,6 +827,8 @@ struct DBOptions { // Unit: byte per second. // // Default: 0 + // + // Dynamically changeable through SetDBOptions() API. uint64_t delayed_write_rate = 0; // By default, a single write thread queue is maintained. The thread gets