diff --git a/librocksdb-sys/rocksdb/.circleci/config.yml b/.circleci/config.yml similarity index 100% rename from librocksdb-sys/rocksdb/.circleci/config.yml rename to .circleci/config.yml diff --git a/librocksdb-sys/rocksdb/.circleci/ubsan_suppression_list.txt b/.circleci/ubsan_suppression_list.txt similarity index 100% rename from librocksdb-sys/rocksdb/.circleci/ubsan_suppression_list.txt rename to .circleci/ubsan_suppression_list.txt diff --git a/librocksdb-sys/rocksdb/.clang-format b/.clang-format similarity index 100% rename from librocksdb-sys/rocksdb/.clang-format rename to .clang-format diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml deleted file mode 100644 index 25ad816..0000000 --- a/.github/workflows/rust.yml +++ /dev/null @@ -1,109 +0,0 @@ -name: RocksDB CI - -on: [push, pull_request] -env: - RUST_VERSION: 1.60.0 - -jobs: - fmt: - name: Rustfmt - runs-on: ubuntu-latest - steps: - - name: Checkout sources - uses: actions/checkout@v2 - - name: Install rust - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ env.RUST_VERSION }} - components: rustfmt - profile: minimal - override: true - - name: Run rustfmt - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check - - doc-check: - name: Rustdoc-check - runs-on: ubuntu-latest - steps: - - name: Checkout sources - uses: actions/checkout@v2 - - name: Install rust - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ env.RUST_VERSION }} - components: rust-docs - profile: minimal - override: true - - name: Run cargo rustdoc - uses: actions-rs/cargo@v1 - with: - command: rustdoc - args: -- -D warnings - - clippy: - name: Clippy - runs-on: ubuntu-latest - steps: - - name: Checkout sources - uses: actions/checkout@v2 - - name: Install rust - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ env.RUST_VERSION }} - components: clippy - profile: minimal - override: true - - name: Run clippy - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --all-targets -- -D warnings - - audit: - name: Security audit - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/audit-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - - test: - name: ${{ matrix.build }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - build: [Linux, macOS, Windows] - include: - - build: Linux - os: ubuntu-latest - - build: macOS - os: macos-latest - - build: Windows - os: windows-latest - steps: - - name: Checkout sources - uses: actions/checkout@v2 - - name: Install rust - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ env.RUST_VERSION }} - target: ${{ matrix.target }} - profile: minimal - override: true - - name: Remove msys64 # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll - if: runner.os == 'Windows' - run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse - - name: Install dependencies - if: runner.os == 'Windows' - run: choco install llvm -y - - name: Run rocksdb tests - run: | - cargo test --all - cargo test --all --features multi-threaded-cf - - name: Run rocksdb tests (jemalloc) - if: runner.os != 'Windows' - run: cargo test --all --features jemalloc diff --git a/librocksdb-sys/rocksdb/.github/workflows/sanity_check.yml b/.github/workflows/sanity_check.yml similarity index 100% rename from librocksdb-sys/rocksdb/.github/workflows/sanity_check.yml rename to .github/workflows/sanity_check.yml diff --git a/.gitignore b/.gitignore index 50a0d03..8dd7e82 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,99 @@ -*.swo -target -Cargo.lock -*.orig -*.bk -*rlib +make_config.mk +rocksdb.pc + +*.a +*.arc +*.d +*.dylib* +*.gcda +*.gcno +*.o +*.o.tmp +*.so +*.so.* +*_test +*_bench +*_stress +*.out +*.class +*.jar +*.*jnilib* +*.d-e +*.o-* +*.swp +*~ +*.vcxproj +*.vcxproj.filters +*.sln +*.cmake +.watchmanconfig +CMakeCache.txt +CMakeFiles/ +build/ + +ldb +manifest_dump +sst_dump +blob_dump +block_cache_trace_analyzer +tools/block_cache_analyzer/*.pyc +column_aware_encoding_exp +util/build_version.cc +build_tools/VALGRIND_LOGS/ +coverage/COVERAGE_REPORT +.gdbhistory +.gdb_history +package/ +unity.a tags -path +etags +rocksdb_dump +rocksdb_undump +db_test2 +trace_analyzer +block_cache_trace_analyzer +io_tracer_parser .DS_Store -.idea +.vs .vscode +.clangd + +java/out +java/target +java/test-libs +java/*.log +java/include/org_rocksdb_*.h + +.idea/ +*.iml + +rocksdb.cc +rocksdb.h +unity.cc +java/crossbuild/.vagrant +.vagrant/ +java/**/*.asc +java/javadoc + +scan_build_report/ +t +LOG + +db_logs/ +tp2/ +fbcode/ +fbcode +buckifier/*.pyc +buckifier/__pycache__ + +compile_commands.json +clang-format-diff.py +.py3/ + +fuzz/proto/gen/ +fuzz/crash-* + +cmake-build-* +third-party/folly/ +.cache +*.sublime-* diff --git a/librocksdb-sys/rocksdb/.lgtm.yml b/.lgtm.yml similarity index 100% rename from librocksdb-sys/rocksdb/.lgtm.yml rename to .lgtm.yml diff --git a/librocksdb-sys/rocksdb/.watchmanconfig b/.watchmanconfig similarity index 100% rename from librocksdb-sys/rocksdb/.watchmanconfig rename to .watchmanconfig diff --git a/librocksdb-sys/rocksdb/AUTHORS b/AUTHORS similarity index 100% rename from librocksdb-sys/rocksdb/AUTHORS rename to AUTHORS diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index c6c487d..0000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,416 +0,0 @@ -# Changelog - -## [Unreleased] - -## 0.21.0 (2023-05-09) - -* Add doc-check to CI with fix warnings in docs (YuraKotov) -* Fix rustdoc::broken-intra-doc-links errors (YuraKotov) -* Fix 32-bit ARM build (EyeOfPython) -* Allow specifying checksum type (romanz) -* Enable librocksdb-sys to be built by rustc_codegen_cranelift (ZePedroResende) -* Update to RocksDB 8.0.0 (niklasf) -* Block cache creation failure is not recoverable (niklasf) -* Update iOS min version to 12 in the build script (mighty840) -* Actually enable `io-uring` (niklasf) -* Update to RocksDB 8.1.1 (niklasf) -* Add `Cache::new_hyper_clock_cache()` (niklasf) -* Retrieve Value from KeyMayExist if value found in Cache or Memory (Congyuwang) -* Support for comparators as closures (pegesund) -* Fix bug in DBWALIterator that would miss updates (Zagitta) - -## 0.20.1 (2023-02-10) - -* Fix supporting MSRV 1.60.0 (aleksuss) - -## 0.20.0 (2023-02-09) - -* Support RocksDB 7.x `BackupEngineOptions` (exabytes18) -* Fix `int128` compatibility check (Dirreke) -* Add `Options::load_latest` method to load the latest options from RockDB (Congyuwang) -* Bump bindgen to 0.64.0 (cwlittle) -* Bump rocksdb to 7.9.2 (kwek20) -* Make `set_snapshot` method public (a14e) -* Add `drop_cf` function to `TransactionDB` (bothra90) -* Bump rocksdb to 7.8.3 (aleksuss) -* Add doc for `set_cache_index_and_filter_blocks` (guerinoni) -* Re-run `build.rs` if env vars change (drahnr) -* Add `WriteBatch::data` method (w41ter) -* Add `DB::open_cf_with_opts` method (w41ter) -* Use lz4-sys crate rather then submodule (niklasf) -* Make create_new_backup_flush generic (minshao) - -## 0.19.0 (2022-08-05) - -* Add support for building with `io_uring` on Linux (parazyd) -* Change iterators to return Result (mina86) -* Support RocksDB transaction (yiyuanliu) -* Avoid pulling in dependencies via static feature flag (niklasf) -* Bump `rocksdb` to 7.4.4 (niklasf) -* Bump `tikv-jemalloc-sys` to 0.5 (niklasf) -* Update `set_use_fsync` comment (nazar-pc) -* Introduce ReadOptions::set_iterate_range and PrefixRange (mina86) -* Bump `rocksdb` to 7.4.3 (aleksuss) -* Don’t hold onto ReadOptions.inner when iterating (mina86) -* Bump `zstd-sys` from 1.6 to 2.0 (slightknack) -* Enable a building on the iOS platform (dignifiedquire) -* Add DBRawIteratorWithThreadMode::item method (mina86) -* Use NonNull in DBRawIteratorWithThreadMode (mina86) -* Tiny refactoring including fix for UB (niklasf) -* Add batched version MultiGet API (yhchiang-sol) -* Upgrade to rocksdb v7.3.1 (yhchiang-sol) -* Consistently use `ffi_util::to_cpath` to convert `Path` to `CString` (mina86) -* Convert properties to `&CStr` (mina86) -* Allow passing `&CStr` arguments (mina86) -* Fix memory leak when reading properties and avoid memory allocation (mina86) -* Fix Windows UTF-8 build flag (rajivshah3) -* Use more target features to build librocksdb-sys (niklasf) -* Fix `bz_internal_error` symbol multiply defined (nanpuyue) -* Bump rocksdb to 7.1.2 (dignifiedquire) -* Add BlobDB options (dignifiedquire) -* Add snapshot `PinnableSlice` based API (zheland) - -## 0.18.0 (2022-02-03) - -* Add open_cf_descriptor methods for Secondary and ReadOnly AccessType (steviez) -* Make Ribbon filters available (niklasf) -* Change versioning scheme of `librocksdb-sys` crate (aleksuss) -* Upgrade to RocksDB 6.28.2 (akrylysov) -* Fix theoretical UB while transmuting Arc (niklasf) -* Support configuring bottom-most compression level (mina86) -* Add BlockBasedOptions::set_whole_key_filtering (niklasf) -* Add constants for all supported properties (steviez) -* Make CacheWrapper and EnvWrapper Send and Sync (aleksuss) -* Replace mem::transmute with narrower conversions (niklasf) -* Optimize non-overlapping copy in raw_data (niklasf) -* Support multi_get_* methods (olegnn) -* Optimize multi_get_cf_opt() to use size hint (niklasf) -* Fix typo in set_background_purge_on_iterator_cleanup method (Congyuwang) -* Use external compression crates where possible (Dr-Emann) -* Update compression dependencies (akrylysov) -* Add method for opening DB with ro access and cf descriptors (nikurt) -* Support restoring from a specified backup (GoldenLeaves) -* Add merge operands iterator (0xdeafbeef) -* Derive serde::{Serialize, Deserialize} for configuration enums (thibault-martinez) -* Add feature flag for runtime type information and metadata (jgraettinger) -* Add set_info_log_level to control log verbosity (tkintscher) -* Replace jemalloc-sys for tikv-jemalloc-sys (Rexagon) -* Support UTF-8 file paths on Windows (rajivshah3) -* Support building RocksDB with jemalloc (akrylysov) -* Add rocksdb WAL flush api (duarten) -* Update rocksdb to v6.22.1 (duarten) - -## 0.17.0 (2021-07-22) - -* Fix `multi_get` method (mikhailOK) -* Bump `librocksdb-sys` up to 6.19.3 (olegnn) -* Add support for the cuckoo table format (rbost) -* RocksDB is not compiled with SSE4 instructions anymore unless the corresponding features are enabled in rustc (mbargull) -* Bump `librocksdb-sys` up to 6.20.3 (olegnn, akrylysov) -* Add `DB::key_may_exist_cf_opt` method (stanislav-tkach) -* Add `Options::set_zstd_max_train_bytes` method (stanislav-tkach) -* Mark Cache and Env as Send and Sync (akrylysov) -* Allow cloning the Cache and Env (duarten) -* Make SSE inclusion conditional for target features (mbargull) -* Use Self where possible (adamnemecek) -* Don't leak dropped column families (ryoqun) - -## 0.16.0 (2021-04-18) - -* Add `DB::cancel_all_background_work` method (stanislav-tkach) -* Bump `librocksdb-sys` up to 6.13.3 (aleksuss) -* Add `multi_get`, `multi_get_opt`, `multi_get_cf` and `multi_get_cf_opt` `DB` methods (stanislav-tkach) -* Allow setting options on a ColumnFamily (romanz) -* Fix logic related to merge operator settings (BoOTheFurious) -* Export persist_period_sec option and background_threads (developerfred) -* Remove unneeded bindgen features (Kixunil) -* Add merge delete_callback omitted by mistake (zhangsoledad) -* Bump `librocksdb-sys` up to 6.17.3 (ordian) -* Remove the need for `&mut self` in `create_cf` and `drop_cf` (v2) (ryoqun) -* Keep Cache and Env alive with Rc (acrrd) -* Add `DB::open_cf_with_ttl` method (fdeantoni) - -## 0.15.0 (2020-08-25) - -* Fix building rocksdb library on windows host (aleksuss) -* Add github actions CI for windows build (aleksuss) -* Update doc for `Options::set_compression_type` (wqfish) -* Add clippy linter in CI (aleksuss) -* Use DBPath for backup_restore test (wqfish) -* Allow to build RocksDB with a different stdlib (calavera) -* Add some doc-comments and tiny refactoring (aleksuss) -* Expose `open_with_ttl`. (calavera) -* Fixed build for `x86_64-linux-android` that doesn't support PCLMUL (vimmerru) -* Add support for `SstFileWriter` and `DB::ingest_external_file` (methyl) -* Add set_max_log_file_size and set_recycle_log_file_num to the Options (stanislav-tkach) -* Export the `DEFAULT_COLUMN_FAMILY_NAME` constant (stanislav-tkach) -* Fix slice transformers with no in_domain callback (nelhage) -* Don't segfault on failed a merge operator (nelhage) -* Adding read/write/db/compaction options (linxGnu) -* Add dbpath and env options (linxGnu) -* Add compaction filter factory API (unrealhoang) -* Add link stdlib when linking prebuilt rocksdb (unrealhoang) -* Support fetching sst files metadata, delete files in range, get mem usage (linxGnu) -* Do not set rerun-if-changed=build.rs (xu-cheng) -* Use pretty_assertions in tests (stanislav-tkach) -* librocksdb-sys: update rocksdb to 6.11.4 (ordian) -* Adding backup engine info (linxGnu) -* Implement `Clone` trait for `Options` (stanislav-tkach) -* Added `Send` implementation to `WriteBatch` (stanislav-tkach) -* Extend github actions (stanislav-tkach) -* Avoid copy for merge operator result using delete_callback (xuchen-plus) - -## 0.14.0 (2020-04-22) - -* Updated lz4 to v1.9.2 (ordian) -* BlockBasedOptions: expose `format_version`, `[index_]block_restart_interval` (ordian) -* Improve `ffi_try` macro to make trailing comma optional (wqfish) -* Add `set_ratelimiter` to the `Options` (PatrickNicholas) -* Add `set_max_total_wal_size` to the `Options` (wqfish) -* Simplify conversion on iterator item (zhangsoledad) -* Add `flush_cf` method to the `DB` (wqfish) -* Fix potential segfault when calling `next` on the `DBIterator` that is at the end of the range (wqfish) -* Move to Rust 2018 (wqfish) -* Fix doc for `WriteBatch::delete` (wqfish) -* Bump `uuid` and `bindgen` dependencies (jonhoo) -* Change APIs that never return error to not return `Result` (wqfish) -* Fix lifetime parameter for iterators (wqfish) -* Add a doc for `optimize_level_style_compaction` method (NikVolf) -* Make `DBPath` use `tempfile` (jder) -* Refactor `db.rs` and `lib.rs` into smaller pieces (jder) -* Check if we're on a big endian system and act upon it (knarz) -* Bump internal snappy version up to 1.1.8 (aleksuss) -* Bump rocksdb version up to 6.7.3 (aleksuss) -* Atomic flush option (mappum) -* Make `set_iterate_upper_bound` method safe (wqfish) -* Add support for data block hash index (dvdplm) -* Add some extra config options (casualjim) -* Add support for range delete APIs (wqfish) -* Improve building `librocksdb-sys` with system libraries (basvandijk) -* Add support for `open_for_read_only` APIs (wqfish) -* Fix doc for `DBRawIterator::prev` and `next` methods (wqfish) -* Add support for `open_as_secondary` APIs (calavera) - -## 0.13.0 (2019-11-12) - -### Changes - -* Added `ReadOptions::set_verify_checksums` and - `Options::set_level_compaction_dynamic_level_bytes` methods (ordian) -* Array of bytes has been changed for pinnable slice for get operations (nbdd0121) -* Implemented `Sync` for `DBRawIterator` (nbdd0121) -* Removed extra copy in DBRawIterator (nbdd0121) -* Added `Options::max_dict_bytes` and `Options::zstd_max_training_bytes` methods(methyl) -* Added Android support (rtsisyk) -* Added lifetimes for `DBIterator` return types (ngotchac) -* Bumped rocksdb up to 6.2.4 (aleksuss) -* Disabled trait derivation for librocksdb-sys (EyeOfPython) -* Added `DB::get_updates_since()` to iterate write batches in a given sequence (nlfiedler) -* Added `ReadOptions::set_tailing()` to create a tailing iterator that continues to - iterate over the database as new records are added (cjbradfield) -* Changed column families storing (aleksuss) -* Exposed the `status` method on iterators (rnarubin) - -## 0.12.3 (2019-07-19) - -### Changes - -* Enabled sse4.2/pclmul for accelerated crc32c (yjh0502) -* Added `set_db_write_buffer_size` to the Options API (rnarubin) -* Bumped RocksDB to 6.1.2 (lispy) -* Added `Sync` and `Send` implementations to `Snapshot` (pavel-mukhanov) -* Added `raw_iterator_cf_opt` to the DB API (rnarubin) -* Added `DB::latest_sequence_number` method (vitvakatu) - -## 0.12.2 (2019-05-03) - -### Changes - -* Updated `compact_range_cf` to use generic arguments (romanz) -* Removed allocations from `SliceTransform` implementation (ekmartin) -* Bumped RocksDB to 5.18.3 (baptistejamin) -* Implemented `delete_range` and `delete_range_cf` (baptistejamin) -* Added contribution guide (rhurkes) -* Cleaned up documentation for `ReadOptions.set_iterate_upper_bound` method (xiaobogaga) -* Added `flush` and `flush_opt` operations (valeriansaliou) - -## 0.12.1 (2019-03-27) - -### Changes - -* Added `iterator_cf_opt` function to `DB` (elichai) -* Added `set_allow_mmap_writes` and `set_allow_mmap_reads` functions to `Options` (aleksuss) - - -## 0.12.0 (2019-03-10) - -### Changes - -* Added support for PlainTable factories (ekmartin) -* Added ability to restore latest backup (rohitjoshi) -* Added support for pinnable slices (xxuejie) -* Added ability to get property values (ekmartin) -* Simplified opening database when using non-default column families (iSynaptic) -* `ColumnFamily`, `DBIterator` and `DBRawIterator` now have lifetime parameters to prevent using them after the `DB` has been dropped (iSynaptic) -* Creating `DBIterator` and `DBRawIterator` now accept `ReadOptions` (iSynaptic) -* All database operations that accepted byte slices, `&[u8]`, are now generic and accept anything that implements `AsRef<[u8]>` (iSynaptic) -* Bumped RocksDB to version 5.17.2 (aleksuss) -* Added `set_readahead_size` to `ReadOptions` (iSynaptic) -* Updated main example in doc tests (mohanson) -* Updated requirements documentation (jamesray1) -* Implemented `AsRef<[u8]>` for `DBVector` (iSynaptic) - - -## 0.11.0 (2019-01-10) - -### Announcements - -* This is the first release under the new [Maintainership](MAINTAINERSHIP.md) model. - Three contributors have been selected to help maintain this library -- Oleksandr Anyshchenko ([@aleksuss](https://github.com/aleksuss)), Jordan Terrell ([@iSynaptic](https://github.com/iSynaptic)), and Ilya Bogdanov ([@vitvakatu](https://github.com/vitvakatu)). Many thanks to Tyler Neely ([@spacejam](https://github.com/spacejam)) for your support while taking on this new role. - -* A [gitter.im chat room](https://gitter.im/rust-rocksdb/Lobby) has been created. Although it's not guaranteed to be "staffed", it may help to collaborate on changes to `rust-rocksdb`. - -### Changes - -* added LZ4, ZSTD, ZLIB, and BZIP2 compression support (iSynaptic) -* added support for `Checkpoint` (aleksuss) -* added support for `SliceTransform` (spacejam) -* added `DBPath` struct to ensure test databases are cleaned up (ekmartin, iSynaptic) -* fixed `rustfmt.toml` to work with newer `rustfmt` version (ekmartin, iSynaptic) -* bindgen bumped up to 0.43 (s-panferov) -* made `ColumnFamily` struct `Send` (Tpt) -* made `DBIterator` struct `Send` (Elzor) -* `create_cf` and `drop_cf` methods on `DB` now work with immutable references (aleksuss) -* fixed crash in `test_column_family` test on macOS (aleksuss) -* fixed/implemented CI builds for macOS and Windows (aleksuss, iSynaptic) -* exposed `set_skip_stats_update_on_db_open` option (romanz) -* exposed `keep_log_file_num` option (romanz) -* added ability to retrieve `WriteBatch` serialized size (romanz) -* added `set_options` method to `DB` to allow changing options without closing and re-opening the database (romanz) - - -## 0.10.1 (2018-07-17) - -* bump bindgen to 0.37 (ekmartin) -* bump rocksdb to 5.14.2 (ekmartin) -* add disable_cache to block-based options (ekmartin) -* add set_wal_dir (ekmartin) -* add set_memtable_prefix_bloom_ratio (ekmartin) -* add MemtableFactory support (ekmartin) -* add full_iterator (ekmartin) -* allow index type specification on block options (ekmartin) -* fix windows build (iSynaptic) - -## 0.10.0 (2018-03-17) - -* Bump rocksdb to 5.11.3 (spacejam) - -### New Features - -* Link with system rocksdb and snappy libs through envvars (ozkriff) - -### Breaking Changes - -* Fix reverse iteration from a given key (ongardie) - -## 0.9.1 (2018-02-10) - -### New Features - -* SliceTransform support (spacejam) - -## 0.9.0 (2018-02-10) - -### New Features - -* Allow creating iterators over prefixes (glittershark) - -### Breaking Changes - -* Open cfs with options (garyttierney, rrichardson) -* Non-Associative merge ops (rrichardson) - -## 0.8.3 (2018-02-10) - -* Bump rocksdb to 5.10.2 (ongardie) -* Add Send marker to Options (iSynaptic) -* Expose advise_random_on_open option (ongardie) - -## 0.8.2 (2017-12-28) - -* Bump rocksdb to 5.7.1 (jquesnelle) - -## 0.8.1 (2017-09-08) - -* Added list_cf (jeizsm) - -## 0.8.0 (2017-09-02) - -* Removed set_disable_data_sync (glittershark) - -## 0.7.2 (2017-09-02) - -* Bumped rocksdb to 5.6.2 (spacejam) - -## 0.7.1 (2017-08-29) - -* Bumped rocksdb to 5.6.1 (vmx) - -## 0.7 (2017-07-26) - -### Breaking Changes - -* Bumped rocksdb to 5.4.6 (derekdreery) -* Remove `use_direct_writes` now that `use_direct_io_for_flush_and_compaction` exists (derekdreery) - -### New Features - -* ReadOptions is now public (rschmukler) -* Implement Clone and AsRef for Error (daboross) -* Support for `seek_for_prev` (kaedroho) -* Support for DirectIO (kaedroho) - -### Internal Cleanups - -* Fixed race condition in tests (debris) -* Move tests to the default `tests` directory (vmx) - -## 0.6.1 (2017-03-13) - -### New Features - -* Support for raw iterator access (kaedroho) - -## 0.6 (2016-12-18) - -### Breaking Changes - -* Comparator function now returns an Ordering (alexreg) - -### New Features - -* Compaction filter (tmccombs) -* Support for backups (alexreg) - -0.5 (2016-11-20) - -### Breaking changes - -* No more Writable trait, as WriteBatch is not thread-safe as a DB (spacejam) -* All imports of `rocksdb::rocksdb::*` should now be simply `rocksdb::*` (alexreg) -* All errors changed to use a new `rocksdb::Error` type (kaedroho, alexreg) -* Removed `Options.set_filter_deletes` as it was removed in RocksDB (kaedroho) -* Renamed `add_merge_operator` to `set_merge_operator` and `add_comparator` to `set_comparator` (kaedroho) - -### New Features - -* Windows support (development by jsgf and arkpar. ported by kaedroho) -* The RocksDB library is now built at crate compile-time and statically linked with the resulting binary (development by jsgf and arkpar. ported by kaedroho) -* Cleaned up and improved coverage and tests of the ffi module (alexreg) -* Added many new methods to the `Options` type (development by ngaut, BusyJay, zhangjinpeng1987, siddontang and hhkbp2. ported by kaedroho) -* Added `len` and `is_empty` methods to `WriteBatch` (development by siddontang. ported by kaedroho) -* Added `path` mathod to `DB` (development by siddontang. ported by kaedroho) -* `DB::open` now accepts any type that implements `Into` as the path argument (kaedroho) -* `DB` now implements the `Debug` trait (kaedroho) -* Add iterator_cf to snapshot (jezell) -* Changelog started diff --git a/librocksdb-sys/rocksdb/CMakeLists.txt b/CMakeLists.txt similarity index 100% rename from librocksdb-sys/rocksdb/CMakeLists.txt rename to CMakeLists.txt diff --git a/librocksdb-sys/rocksdb/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md similarity index 100% rename from librocksdb-sys/rocksdb/CODE_OF_CONDUCT.md rename to CODE_OF_CONDUCT.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b4d3d41..190100b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,48 +1,17 @@ -# Contributing to rust-rocksdb -Thank you for taking an interest in the project, and contributing to it - it's appreciated! There are several ways you can contribute: -- [Bug Reports](#bug-reports) -- [Feature Requests](#feature-requests) -- [Documentation](#documentation) -- [Discussion](#discussion) -- [Pull Requests](#pull-requests) +# Contributing to RocksDB -**Please note all contributors must adhere to the [code of conduct](code-of-conduct.md).** +## Code of Conduct +The code of conduct is described in [`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md) -## Bug Reports -[bug-reports]: #bug-reports -- **Ensure the bug has not already been reported** - this can be done with a quick search of the [existing open issues](https://github.com/rust-rocksdb/rust-rocksdb/issues?q=is%3Aissue+is%3Aopen+). -- **Ensure the bug applies to the Rust wrapper, and not the underlying library** - bugs in the RocksDB library should be [reported upstream](https://github.com/facebook/rocksdb/issues). -- When [creating an issue](https://github.com/rust-rocksdb/rust-rocksdb/issues/new) please try to: - - **Use a clear and descriptive title** to identify the issue - - **Provide enough context** to acurately summarize the issue. Not every issue will need detailed steps to recreate, example code, stack traces, etc. - use your own judgment on what information would be helpful to anyone working on the issue. It's easier for someone to skim over too much context, than stop and wait for a response when context is missing. +## Contributor License Agreement ("CLA") -## Feature Requests -[feature-requests]: #feature-requests -Feature requests will primarily come in the form of ergonomics involving the Rust language, or in bringing the wrapper into parity with the library's API. Please create an issue with any relevant information. +In order to accept your pull request, we need you to submit a CLA. You +only need to do this once, so if you've done this for another Facebook +open source project, you're good to go. If you are submitting a pull +request for the first time, just let us know that you have completed +the CLA and we can cross-check with your GitHub username. -## Documentation -[documentation]: #documentation -Much of the documentation should mirror or reference the library's [documentation](https://github.com/facebook/rocksdb/wiki). If the wrapper or its exposed functions are missing documentation or contain inaccurate information please submit a pull request. - -## Discussion -[discussion]: #discussion -Discussion around design and development of the wrapper primarily occurs within issues and pull requests. Don't be afraid to participate if you have questions, concerns, insight, or advice. - -## Pull Requests -[pull-requests]: #pull-requests -Pull requests are welcome, and when contributing code, the author agrees to do so under the project's [licensing](https://github.com/rust-rocksdb/rust-rocksdb/blob/master/LICENSE) - Apache 2.0 as of the time of this writing. The maintainers greatly appreciate PRs that follow open-source contribution best practices: -1. Fork this repository to your personal GitHub account. -1. Create a branch that includes your changes, **keep changes isolated and granular**. -1. Include any relevant documentation and/or tests. Write [documentation tests](https://doc.rust-lang.org/rustdoc/documentation-tests.html) when relevant. -1. Apply `cargo fmt` to ensure consistent formatting. -1. [Create a pull request](https://help.github.com/en/articles/about-pull-requests) against this repository. - -For pull requests that would benefit from discussion and review earlier in the development process, use a [Draft Pull Request](https://help.github.com/en/articles/about-pull-requests#draft-pull-requests). - -## Additional Resources -Some useful information for working with RocksDB in Rust: -- [RocksDB library primary site](https://rocksdb.org) -- [RocksDB library GitHub repository](https://github.com/facebook/rocksdb) -- [RocksDB library documentation](https://github.com/facebook/rocksdb/wiki) -- [Rust's Foreign Function Interface (ffi)](https://doc.rust-lang.org/nomicon/ffi.html) +Complete your CLA here: +If you prefer to sign a paper copy, we can send you a PDF. Send us an +e-mail or create a new github issue to request the CLA in PDF format. diff --git a/librocksdb-sys/lz4/examples/COPYING b/COPYING similarity index 100% rename from librocksdb-sys/lz4/examples/COPYING rename to COPYING diff --git a/Cargo.toml b/Cargo.toml deleted file mode 100644 index bf0a5b8..0000000 --- a/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "rocksdb" -description = "Rust wrapper for Facebook's RocksDB embeddable database" -version = "0.21.0" -edition = "2018" -rust-version = "1.60" -authors = ["Tyler Neely ", "David Greenberg "] -repository = "https://git.nextgraph.org/NextGraph/rust-rocksdb" -license = "Apache-2.0" -categories = [ "database" ] -keywords = ["database", "embedded", "LSM-tree", "persistence"] -homepage = "https://git.nextgraph.org/NextGraph/rust-rocksdb" -exclude = [ - ".gitignore", - ".travis.yml", - "deploy.sh", - "test/**/*", -] - -[workspace] -members = ["librocksdb-sys"] - -[features] -default = [] -jemalloc = ["librocksdb-sys/jemalloc"] -io-uring = ["librocksdb-sys/io-uring"] -valgrind = [] -snappy = ["librocksdb-sys/snappy"] -lz4 = ["librocksdb-sys/lz4"] -zstd = ["librocksdb-sys/zstd"] -zlib = ["librocksdb-sys/zlib"] -bzip2 = ["librocksdb-sys/bzip2"] -rtti = ["librocksdb-sys/rtti"] -multi-threaded-cf = [] -serde1 = ["serde"] - -[dependencies] -libc = "0.2" -librocksdb-sys = { path = "librocksdb-sys", version = "0.11.0" } -serde = { version = "1", features = [ "derive" ], optional = true } - -[dev-dependencies] -trybuild = "1.0" -tempfile = "3.1" -pretty_assertions = "1.0" -bincode = "1.3" -serde = { version = "1", features = [ "derive" ] } diff --git a/librocksdb-sys/rocksdb/DEFAULT_OPTIONS_HISTORY.md b/DEFAULT_OPTIONS_HISTORY.md similarity index 100% rename from librocksdb-sys/rocksdb/DEFAULT_OPTIONS_HISTORY.md rename to DEFAULT_OPTIONS_HISTORY.md diff --git a/librocksdb-sys/rocksdb/DUMP_FORMAT.md b/DUMP_FORMAT.md similarity index 100% rename from librocksdb-sys/rocksdb/DUMP_FORMAT.md rename to DUMP_FORMAT.md diff --git a/librocksdb-sys/rocksdb/HISTORY.md b/HISTORY.md similarity index 100% rename from librocksdb-sys/rocksdb/HISTORY.md rename to HISTORY.md diff --git a/librocksdb-sys/rocksdb/INSTALL.md b/INSTALL.md similarity index 100% rename from librocksdb-sys/rocksdb/INSTALL.md rename to INSTALL.md diff --git a/librocksdb-sys/rocksdb/LANGUAGE-BINDINGS.md b/LANGUAGE-BINDINGS.md similarity index 100% rename from librocksdb-sys/rocksdb/LANGUAGE-BINDINGS.md rename to LANGUAGE-BINDINGS.md diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d645695..0000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/librocksdb-sys/rocksdb/LICENSE.Apache b/LICENSE.Apache similarity index 100% rename from librocksdb-sys/rocksdb/LICENSE.Apache rename to LICENSE.Apache diff --git a/librocksdb-sys/rocksdb/LICENSE.leveldb b/LICENSE.leveldb similarity index 100% rename from librocksdb-sys/rocksdb/LICENSE.leveldb rename to LICENSE.leveldb diff --git a/MAINTAINERSHIP.md b/MAINTAINERSHIP.md deleted file mode 100644 index af038c4..0000000 --- a/MAINTAINERSHIP.md +++ /dev/null @@ -1,43 +0,0 @@ -Maintainers agree to operate under this set of guidelines: - -#### Authority - -Maintainers are trusted to close issues, merge pull requests, and publish crates to cargo. - -#### Categories of Work - -0. Minor - * updating the changelog - * requires no approval -1. Normal - * librocksdb-sys updates - * API tracking code in the rocksdb crate that does not change control flow - * breaking changes due to removed functionality in rocksdb - * require 1 approval from another maintainer. if no maintainer is able to be reached for 2 weeks, then progress may be made anyway - * patch (and post-1.0, minor) releases to crates.io that contain only the above work -2. Major - * breaking API changes that are not direct consequences of underlying rocksdb changes - * refactoring, which should generally only be done for clearly functional reasons like to aid in the completion of a specific task - * require consensus among all maintainers unless 2 weeks have gone by without full participation - * if 2 weeks have gone by after seeking feedback, and at least one other maintainer has participated, and all participating maintainers are in agreement, then progress may be made anyway - * if action is absolutely urgent, an organization owner may act as a tiebreaker if specifically requested to do so and they agree that making a controversial decision is worth the risk. This should hopefully never occur. - -If any maintainer thinks an issue is major, it is major. - -#### Changelog Maintenance - -* If you are the one who merges a PR that includes an externally-visible change, please describe the change in the changelog and merge it in. - -#### Releasing, Publishing - -* Releases adhere to [semver](https://semver.org/) -* To cut a release, an issue should be opened for it and reach the required approval based on the above `Categories of Work` section above -* When progress is possible, the issue may be closed and the proposer may publish to crates.io. This is controlled by those in the [crate publishers organization-level team](https://github.com/orgs/rust-rocksdb/teams/crate-publishers). -* Releases should have an associated tag pushed to this repo. I recommend doing this after the publish to crates.io succeeds to prevent any mishaps around pushing a tag for something that can't actually be published. -* The changelog serves as a sort of logical staging area for releases -* If a breaking API change happens, and the changelog has not advanced to a new major version, we roll the changelog to a new major version and open an issue to release the previous patch (and post-1.0, minor) version. -* Before rolling to a new major version, it would be nice to release a non-breaking point release to let current users silently take advantage of any improvements - -#### Becoming a Maintainer - -* If you have a history of participation in this repo, agree to these rules, and wish to take on maintainership responsibilities, you may open an issue. If an owner agrees, they will add you to the maintainer group and the crate publishers team. diff --git a/librocksdb-sys/rocksdb/Makefile b/Makefile similarity index 100% rename from librocksdb-sys/rocksdb/Makefile rename to Makefile diff --git a/librocksdb-sys/rocksdb/PLUGINS.md b/PLUGINS.md similarity index 100% rename from librocksdb-sys/rocksdb/PLUGINS.md rename to PLUGINS.md diff --git a/README.md b/README.md index 6ad1837..8fcc4ab 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,29 @@ -# rust-rocksdb +## RocksDB: A Persistent Key-Value Store for Flash and RAM Storage -![RocksDB build](https://github.com/rust-rocksdb/rust-rocksdb/workflows/RocksDB%20build/badge.svg?branch=master) -[![crates.io](https://img.shields.io/crates/v/rocksdb.svg)](https://crates.io/crates/rocksdb) -[![documentation](https://docs.rs/rocksdb/badge.svg)](https://docs.rs/rocksdb) -[![license](https://img.shields.io/crates/l/rocksdb.svg)](https://github.com/rust-rocksdb/rust-rocksdb/blob/master/LICENSE) -[![Gitter chat](https://badges.gitter.im/rust-rocksdb/gitter.png)](https://gitter.im/rust-rocksdb/lobby) -![rust 1.60.0 required](https://img.shields.io/badge/rust-1.60.0-blue.svg?label=MSRV) +[![CircleCI Status](https://circleci.com/gh/facebook/rocksdb.svg?style=svg)](https://circleci.com/gh/facebook/rocksdb) -![GitHub commits (since latest release)](https://img.shields.io/github/commits-since/rust-rocksdb/rust-rocksdb/latest.svg) +RocksDB is developed and maintained by Facebook Database Engineering Team. +It is built on earlier work on [LevelDB](https://github.com/google/leveldb) by Sanjay Ghemawat (sanjay@google.com) +and Jeff Dean (jeff@google.com) -## Requirements +This code is a library that forms the core building block for a fast +key-value server, especially suited for storing data on flash drives. +It has a Log-Structured-Merge-Database (LSM) design with flexible tradeoffs +between Write-Amplification-Factor (WAF), Read-Amplification-Factor (RAF) +and Space-Amplification-Factor (SAF). It has multi-threaded compactions, +making it especially suitable for storing multiple terabytes of data in a +single database. -- Clang and LLVM +Start with example usage here: https://github.com/facebook/rocksdb/tree/main/examples -### On OpenBSD +See the [github wiki](https://github.com/facebook/rocksdb/wiki) for more explanation. -``` -pkg_add llvm -``` +The public interface is in `include/`. Callers should not include or +rely on the details of any other header files in this package. Those +internal APIs may be changed without warning. -### On macos +Questions and discussions are welcome on the [RocksDB Developers Public](https://www.facebook.com/groups/rocksdb.dev/) Facebook group and [email list](https://groups.google.com/g/rocksdb) on Google Groups. -``` -port install clang -``` +## License -### On windows - -download from [here](https://github.com/llvm/llvm-project/releases/download/llvmorg-16.0.0/LLVM-16.0.0-win64.exe) - -## Contributing - -Feedback and pull requests welcome! If a particular feature of RocksDB is -important to you, please let me know by opening an issue, and I'll -prioritize it. - -## Compression Support - -By default, support for the [Snappy](https://github.com/google/snappy), -[LZ4](https://github.com/lz4/lz4), [Zstd](https://github.com/facebook/zstd), -[Zlib](https://zlib.net), and [Bzip2](http://www.bzip.org) compression -is enabled through crate features. If support for all of these compression -algorithms is not needed, default features can be disabled and specific -compression algorithms can be enabled. For example, to enable only LZ4 -compression support, make these changes to your Cargo.toml: - -``` -[dependencies.rocksdb] -default-features = false -features = ["lz4"] -``` - -## Multithreaded ColumnFamily alternation - -The underlying RocksDB does allow column families to be created and dropped -from multiple threads concurrently. But this crate doesn't allow it by default -for compatibility. If you need to modify column families concurrently, enable -crate feature called `multi-threaded-cf`, which makes this binding's -data structures to use RwLock by default. Alternatively, you can directly create -`DBWithThreadMode` without enabling the crate feature. +RocksDB is dual-licensed under both the GPLv2 (found in the COPYING file in the root directory) and Apache 2.0 License (found in the LICENSE.Apache file in the root directory). You may select, at your option, one of the above-listed licenses. diff --git a/librocksdb-sys/rocksdb/TARGETS b/TARGETS similarity index 100% rename from librocksdb-sys/rocksdb/TARGETS rename to TARGETS diff --git a/librocksdb-sys/rocksdb/USERS.md b/USERS.md similarity index 100% rename from librocksdb-sys/rocksdb/USERS.md rename to USERS.md diff --git a/librocksdb-sys/rocksdb/Vagrantfile b/Vagrantfile similarity index 100% rename from librocksdb-sys/rocksdb/Vagrantfile rename to Vagrantfile diff --git a/librocksdb-sys/rocksdb/WINDOWS_PORT.md b/WINDOWS_PORT.md similarity index 100% rename from librocksdb-sys/rocksdb/WINDOWS_PORT.md rename to WINDOWS_PORT.md diff --git a/librocksdb-sys/rocksdb/buckifier/bench-slow.json b/buckifier/bench-slow.json similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/bench-slow.json rename to buckifier/bench-slow.json diff --git a/librocksdb-sys/rocksdb/buckifier/bench.json b/buckifier/bench.json similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/bench.json rename to buckifier/bench.json diff --git a/librocksdb-sys/rocksdb/buckifier/buckify_rocksdb.py b/buckifier/buckify_rocksdb.py similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/buckify_rocksdb.py rename to buckifier/buckify_rocksdb.py diff --git a/librocksdb-sys/rocksdb/buckifier/check_buck_targets.sh b/buckifier/check_buck_targets.sh similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/check_buck_targets.sh rename to buckifier/check_buck_targets.sh diff --git a/librocksdb-sys/rocksdb/buckifier/rocks_test_runner.sh b/buckifier/rocks_test_runner.sh similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/rocks_test_runner.sh rename to buckifier/rocks_test_runner.sh diff --git a/librocksdb-sys/rocksdb/buckifier/targets_builder.py b/buckifier/targets_builder.py similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/targets_builder.py rename to buckifier/targets_builder.py diff --git a/librocksdb-sys/rocksdb/buckifier/targets_cfg.py b/buckifier/targets_cfg.py similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/targets_cfg.py rename to buckifier/targets_cfg.py diff --git a/librocksdb-sys/rocksdb/buckifier/util.py b/buckifier/util.py similarity index 100% rename from librocksdb-sys/rocksdb/buckifier/util.py rename to buckifier/util.py diff --git a/librocksdb-sys/rocksdb/build_tools/amalgamate.py b/build_tools/amalgamate.py similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/amalgamate.py rename to build_tools/amalgamate.py diff --git a/librocksdb-sys/rocksdb/build_tools/benchmark_log_tool.py b/build_tools/benchmark_log_tool.py similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/benchmark_log_tool.py rename to build_tools/benchmark_log_tool.py diff --git a/librocksdb-sys/rocksdb/build_tools/build_detect_platform b/build_tools/build_detect_platform similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/build_detect_platform rename to build_tools/build_detect_platform diff --git a/librocksdb-sys/rocksdb/build_tools/check-sources.sh b/build_tools/check-sources.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/check-sources.sh rename to build_tools/check-sources.sh diff --git a/librocksdb-sys/rocksdb/build_tools/dependencies_platform010.sh b/build_tools/dependencies_platform010.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/dependencies_platform010.sh rename to build_tools/dependencies_platform010.sh diff --git a/librocksdb-sys/rocksdb/build_tools/dockerbuild.sh b/build_tools/dockerbuild.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/dockerbuild.sh rename to build_tools/dockerbuild.sh diff --git a/librocksdb-sys/rocksdb/build_tools/error_filter.py b/build_tools/error_filter.py similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/error_filter.py rename to build_tools/error_filter.py diff --git a/librocksdb-sys/rocksdb/build_tools/fb_compile_mongo.sh b/build_tools/fb_compile_mongo.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/fb_compile_mongo.sh rename to build_tools/fb_compile_mongo.sh diff --git a/librocksdb-sys/rocksdb/build_tools/fbcode_config.sh b/build_tools/fbcode_config.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/fbcode_config.sh rename to build_tools/fbcode_config.sh diff --git a/librocksdb-sys/rocksdb/build_tools/fbcode_config_platform010.sh b/build_tools/fbcode_config_platform010.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/fbcode_config_platform010.sh rename to build_tools/fbcode_config_platform010.sh diff --git a/librocksdb-sys/rocksdb/build_tools/format-diff.sh b/build_tools/format-diff.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/format-diff.sh rename to build_tools/format-diff.sh diff --git a/librocksdb-sys/rocksdb/build_tools/gnu_parallel b/build_tools/gnu_parallel similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/gnu_parallel rename to build_tools/gnu_parallel diff --git a/librocksdb-sys/rocksdb/build_tools/make_package.sh b/build_tools/make_package.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/make_package.sh rename to build_tools/make_package.sh diff --git a/librocksdb-sys/rocksdb/build_tools/ps_with_stack b/build_tools/ps_with_stack similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/ps_with_stack rename to build_tools/ps_with_stack diff --git a/librocksdb-sys/rocksdb/build_tools/regression_build_test.sh b/build_tools/regression_build_test.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/regression_build_test.sh rename to build_tools/regression_build_test.sh diff --git a/librocksdb-sys/rocksdb/build_tools/run_ci_db_test.ps1 b/build_tools/run_ci_db_test.ps1 similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/run_ci_db_test.ps1 rename to build_tools/run_ci_db_test.ps1 diff --git a/librocksdb-sys/rocksdb/build_tools/setup_centos7.sh b/build_tools/setup_centos7.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/setup_centos7.sh rename to build_tools/setup_centos7.sh diff --git a/librocksdb-sys/rocksdb/build_tools/ubuntu20_image/Dockerfile b/build_tools/ubuntu20_image/Dockerfile similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/ubuntu20_image/Dockerfile rename to build_tools/ubuntu20_image/Dockerfile diff --git a/librocksdb-sys/rocksdb/build_tools/update_dependencies.sh b/build_tools/update_dependencies.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/update_dependencies.sh rename to build_tools/update_dependencies.sh diff --git a/librocksdb-sys/rocksdb/build_tools/version.sh b/build_tools/version.sh similarity index 100% rename from librocksdb-sys/rocksdb/build_tools/version.sh rename to build_tools/version.sh diff --git a/librocksdb-sys/rocksdb/cache/cache.cc b/cache/cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache.cc rename to cache/cache.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_bench.cc b/cache/cache_bench.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_bench.cc rename to cache/cache_bench.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_bench_tool.cc b/cache/cache_bench_tool.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_bench_tool.cc rename to cache/cache_bench_tool.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_entry_roles.cc b/cache/cache_entry_roles.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_entry_roles.cc rename to cache/cache_entry_roles.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_entry_roles.h b/cache/cache_entry_roles.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_entry_roles.h rename to cache/cache_entry_roles.h diff --git a/librocksdb-sys/rocksdb/cache/cache_entry_stats.h b/cache/cache_entry_stats.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_entry_stats.h rename to cache/cache_entry_stats.h diff --git a/librocksdb-sys/rocksdb/cache/cache_helpers.cc b/cache/cache_helpers.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_helpers.cc rename to cache/cache_helpers.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_helpers.h b/cache/cache_helpers.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_helpers.h rename to cache/cache_helpers.h diff --git a/librocksdb-sys/rocksdb/cache/cache_key.cc b/cache/cache_key.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_key.cc rename to cache/cache_key.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_key.h b/cache/cache_key.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_key.h rename to cache/cache_key.h diff --git a/librocksdb-sys/rocksdb/cache/cache_reservation_manager.cc b/cache/cache_reservation_manager.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_reservation_manager.cc rename to cache/cache_reservation_manager.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_reservation_manager.h b/cache/cache_reservation_manager.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_reservation_manager.h rename to cache/cache_reservation_manager.h diff --git a/librocksdb-sys/rocksdb/cache/cache_reservation_manager_test.cc b/cache/cache_reservation_manager_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_reservation_manager_test.cc rename to cache/cache_reservation_manager_test.cc diff --git a/librocksdb-sys/rocksdb/cache/cache_test.cc b/cache/cache_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/cache_test.cc rename to cache/cache_test.cc diff --git a/librocksdb-sys/rocksdb/cache/charged_cache.cc b/cache/charged_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/charged_cache.cc rename to cache/charged_cache.cc diff --git a/librocksdb-sys/rocksdb/cache/charged_cache.h b/cache/charged_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/charged_cache.h rename to cache/charged_cache.h diff --git a/librocksdb-sys/rocksdb/cache/clock_cache.cc b/cache/clock_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/clock_cache.cc rename to cache/clock_cache.cc diff --git a/librocksdb-sys/rocksdb/cache/clock_cache.h b/cache/clock_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/clock_cache.h rename to cache/clock_cache.h diff --git a/librocksdb-sys/rocksdb/cache/compressed_secondary_cache.cc b/cache/compressed_secondary_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/compressed_secondary_cache.cc rename to cache/compressed_secondary_cache.cc diff --git a/librocksdb-sys/rocksdb/cache/compressed_secondary_cache.h b/cache/compressed_secondary_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/compressed_secondary_cache.h rename to cache/compressed_secondary_cache.h diff --git a/librocksdb-sys/rocksdb/cache/compressed_secondary_cache_test.cc b/cache/compressed_secondary_cache_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/compressed_secondary_cache_test.cc rename to cache/compressed_secondary_cache_test.cc diff --git a/librocksdb-sys/rocksdb/cache/lru_cache.cc b/cache/lru_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/lru_cache.cc rename to cache/lru_cache.cc diff --git a/librocksdb-sys/rocksdb/cache/lru_cache.h b/cache/lru_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/lru_cache.h rename to cache/lru_cache.h diff --git a/librocksdb-sys/rocksdb/cache/lru_cache_test.cc b/cache/lru_cache_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/lru_cache_test.cc rename to cache/lru_cache_test.cc diff --git a/librocksdb-sys/rocksdb/cache/secondary_cache.cc b/cache/secondary_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/secondary_cache.cc rename to cache/secondary_cache.cc diff --git a/librocksdb-sys/rocksdb/cache/secondary_cache_adapter.cc b/cache/secondary_cache_adapter.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/secondary_cache_adapter.cc rename to cache/secondary_cache_adapter.cc diff --git a/librocksdb-sys/rocksdb/cache/secondary_cache_adapter.h b/cache/secondary_cache_adapter.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/secondary_cache_adapter.h rename to cache/secondary_cache_adapter.h diff --git a/librocksdb-sys/rocksdb/cache/sharded_cache.cc b/cache/sharded_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/cache/sharded_cache.cc rename to cache/sharded_cache.cc diff --git a/librocksdb-sys/rocksdb/cache/sharded_cache.h b/cache/sharded_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/sharded_cache.h rename to cache/sharded_cache.h diff --git a/librocksdb-sys/rocksdb/cache/typed_cache.h b/cache/typed_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/cache/typed_cache.h rename to cache/typed_cache.h diff --git a/librocksdb-sys/rocksdb/cmake/RocksDBConfig.cmake.in b/cmake/RocksDBConfig.cmake.in similarity index 100% rename from librocksdb-sys/rocksdb/cmake/RocksDBConfig.cmake.in rename to cmake/RocksDBConfig.cmake.in diff --git a/librocksdb-sys/rocksdb/cmake/modules/CxxFlags.cmake b/cmake/modules/CxxFlags.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/CxxFlags.cmake rename to cmake/modules/CxxFlags.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/FindJeMalloc.cmake b/cmake/modules/FindJeMalloc.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/FindJeMalloc.cmake rename to cmake/modules/FindJeMalloc.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/FindNUMA.cmake b/cmake/modules/FindNUMA.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/FindNUMA.cmake rename to cmake/modules/FindNUMA.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/FindSnappy.cmake b/cmake/modules/FindSnappy.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/FindSnappy.cmake rename to cmake/modules/FindSnappy.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/FindTBB.cmake b/cmake/modules/FindTBB.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/FindTBB.cmake rename to cmake/modules/FindTBB.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/Findgflags.cmake b/cmake/modules/Findgflags.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/Findgflags.cmake rename to cmake/modules/Findgflags.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/Findlz4.cmake b/cmake/modules/Findlz4.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/Findlz4.cmake rename to cmake/modules/Findlz4.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/Finduring.cmake b/cmake/modules/Finduring.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/Finduring.cmake rename to cmake/modules/Finduring.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/Findzstd.cmake b/cmake/modules/Findzstd.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/Findzstd.cmake rename to cmake/modules/Findzstd.cmake diff --git a/librocksdb-sys/rocksdb/cmake/modules/ReadVersion.cmake b/cmake/modules/ReadVersion.cmake similarity index 100% rename from librocksdb-sys/rocksdb/cmake/modules/ReadVersion.cmake rename to cmake/modules/ReadVersion.cmake diff --git a/code-of-conduct.md b/code-of-conduct.md deleted file mode 100644 index 6752d1f..0000000 --- a/code-of-conduct.md +++ /dev/null @@ -1,76 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at t@jujit.su. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/librocksdb-sys/rocksdb/common.mk b/common.mk similarity index 100% rename from librocksdb-sys/rocksdb/common.mk rename to common.mk diff --git a/librocksdb-sys/rocksdb/coverage/coverage_test.sh b/coverage/coverage_test.sh similarity index 100% rename from librocksdb-sys/rocksdb/coverage/coverage_test.sh rename to coverage/coverage_test.sh diff --git a/librocksdb-sys/rocksdb/coverage/parse_gcov_output.py b/coverage/parse_gcov_output.py similarity index 100% rename from librocksdb-sys/rocksdb/coverage/parse_gcov_output.py rename to coverage/parse_gcov_output.py diff --git a/librocksdb-sys/rocksdb/crash_test.mk b/crash_test.mk similarity index 100% rename from librocksdb-sys/rocksdb/crash_test.mk rename to crash_test.mk diff --git a/librocksdb-sys/rocksdb/db/arena_wrapped_db_iter.cc b/db/arena_wrapped_db_iter.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/arena_wrapped_db_iter.cc rename to db/arena_wrapped_db_iter.cc diff --git a/librocksdb-sys/rocksdb/db/arena_wrapped_db_iter.h b/db/arena_wrapped_db_iter.h similarity index 100% rename from librocksdb-sys/rocksdb/db/arena_wrapped_db_iter.h rename to db/arena_wrapped_db_iter.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_constants.h b/db/blob/blob_constants.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_constants.h rename to db/blob/blob_constants.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_contents.cc b/db/blob/blob_contents.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_contents.cc rename to db/blob/blob_contents.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_contents.h b/db/blob/blob_contents.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_contents.h rename to db/blob/blob_contents.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_counting_iterator.h b/db/blob/blob_counting_iterator.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_counting_iterator.h rename to db/blob/blob_counting_iterator.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_counting_iterator_test.cc b/db/blob/blob_counting_iterator_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_counting_iterator_test.cc rename to db/blob/blob_counting_iterator_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_fetcher.cc b/db/blob/blob_fetcher.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_fetcher.cc rename to db/blob/blob_fetcher.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_fetcher.h b/db/blob/blob_fetcher.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_fetcher.h rename to db/blob/blob_fetcher.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_addition.cc b/db/blob/blob_file_addition.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_addition.cc rename to db/blob/blob_file_addition.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_addition.h b/db/blob/blob_file_addition.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_addition.h rename to db/blob/blob_file_addition.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_addition_test.cc b/db/blob/blob_file_addition_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_addition_test.cc rename to db/blob/blob_file_addition_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_builder.cc b/db/blob/blob_file_builder.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_builder.cc rename to db/blob/blob_file_builder.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_builder.h b/db/blob/blob_file_builder.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_builder.h rename to db/blob/blob_file_builder.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_builder_test.cc b/db/blob/blob_file_builder_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_builder_test.cc rename to db/blob/blob_file_builder_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_cache.cc b/db/blob/blob_file_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_cache.cc rename to db/blob/blob_file_cache.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_cache.h b/db/blob/blob_file_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_cache.h rename to db/blob/blob_file_cache.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_cache_test.cc b/db/blob/blob_file_cache_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_cache_test.cc rename to db/blob/blob_file_cache_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_completion_callback.h b/db/blob/blob_file_completion_callback.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_completion_callback.h rename to db/blob/blob_file_completion_callback.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_garbage.cc b/db/blob/blob_file_garbage.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_garbage.cc rename to db/blob/blob_file_garbage.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_garbage.h b/db/blob/blob_file_garbage.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_garbage.h rename to db/blob/blob_file_garbage.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_garbage_test.cc b/db/blob/blob_file_garbage_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_garbage_test.cc rename to db/blob/blob_file_garbage_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_meta.cc b/db/blob/blob_file_meta.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_meta.cc rename to db/blob/blob_file_meta.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_meta.h b/db/blob/blob_file_meta.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_meta.h rename to db/blob/blob_file_meta.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_reader.cc b/db/blob/blob_file_reader.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_reader.cc rename to db/blob/blob_file_reader.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_reader.h b/db/blob/blob_file_reader.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_reader.h rename to db/blob/blob_file_reader.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_file_reader_test.cc b/db/blob/blob_file_reader_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_file_reader_test.cc rename to db/blob/blob_file_reader_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_garbage_meter.cc b/db/blob/blob_garbage_meter.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_garbage_meter.cc rename to db/blob/blob_garbage_meter.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_garbage_meter.h b/db/blob/blob_garbage_meter.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_garbage_meter.h rename to db/blob/blob_garbage_meter.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_garbage_meter_test.cc b/db/blob/blob_garbage_meter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_garbage_meter_test.cc rename to db/blob/blob_garbage_meter_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_index.h b/db/blob/blob_index.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_index.h rename to db/blob/blob_index.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_log_format.cc b/db/blob/blob_log_format.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_log_format.cc rename to db/blob/blob_log_format.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_log_format.h b/db/blob/blob_log_format.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_log_format.h rename to db/blob/blob_log_format.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_log_sequential_reader.cc b/db/blob/blob_log_sequential_reader.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_log_sequential_reader.cc rename to db/blob/blob_log_sequential_reader.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_log_sequential_reader.h b/db/blob/blob_log_sequential_reader.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_log_sequential_reader.h rename to db/blob/blob_log_sequential_reader.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_log_writer.cc b/db/blob/blob_log_writer.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_log_writer.cc rename to db/blob/blob_log_writer.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_log_writer.h b/db/blob/blob_log_writer.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_log_writer.h rename to db/blob/blob_log_writer.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_read_request.h b/db/blob/blob_read_request.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_read_request.h rename to db/blob/blob_read_request.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_source.cc b/db/blob/blob_source.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_source.cc rename to db/blob/blob_source.cc diff --git a/librocksdb-sys/rocksdb/db/blob/blob_source.h b/db/blob/blob_source.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_source.h rename to db/blob/blob_source.h diff --git a/librocksdb-sys/rocksdb/db/blob/blob_source_test.cc b/db/blob/blob_source_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/blob_source_test.cc rename to db/blob/blob_source_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/db_blob_basic_test.cc b/db/blob/db_blob_basic_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/db_blob_basic_test.cc rename to db/blob/db_blob_basic_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/db_blob_compaction_test.cc b/db/blob/db_blob_compaction_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/db_blob_compaction_test.cc rename to db/blob/db_blob_compaction_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/db_blob_corruption_test.cc b/db/blob/db_blob_corruption_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/db_blob_corruption_test.cc rename to db/blob/db_blob_corruption_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/db_blob_index_test.cc b/db/blob/db_blob_index_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/db_blob_index_test.cc rename to db/blob/db_blob_index_test.cc diff --git a/librocksdb-sys/rocksdb/db/blob/prefetch_buffer_collection.cc b/db/blob/prefetch_buffer_collection.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/prefetch_buffer_collection.cc rename to db/blob/prefetch_buffer_collection.cc diff --git a/librocksdb-sys/rocksdb/db/blob/prefetch_buffer_collection.h b/db/blob/prefetch_buffer_collection.h similarity index 100% rename from librocksdb-sys/rocksdb/db/blob/prefetch_buffer_collection.h rename to db/blob/prefetch_buffer_collection.h diff --git a/librocksdb-sys/rocksdb/db/builder.cc b/db/builder.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/builder.cc rename to db/builder.cc diff --git a/librocksdb-sys/rocksdb/db/builder.h b/db/builder.h similarity index 100% rename from librocksdb-sys/rocksdb/db/builder.h rename to db/builder.h diff --git a/librocksdb-sys/rocksdb/db/c.cc b/db/c.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/c.cc rename to db/c.cc diff --git a/librocksdb-sys/rocksdb/db/c_test.c b/db/c_test.c similarity index 100% rename from librocksdb-sys/rocksdb/db/c_test.c rename to db/c_test.c diff --git a/librocksdb-sys/rocksdb/db/column_family.cc b/db/column_family.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/column_family.cc rename to db/column_family.cc diff --git a/librocksdb-sys/rocksdb/db/column_family.h b/db/column_family.h similarity index 100% rename from librocksdb-sys/rocksdb/db/column_family.h rename to db/column_family.h diff --git a/librocksdb-sys/rocksdb/db/column_family_test.cc b/db/column_family_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/column_family_test.cc rename to db/column_family_test.cc diff --git a/librocksdb-sys/rocksdb/db/compact_files_test.cc b/db/compact_files_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compact_files_test.cc rename to db/compact_files_test.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/clipping_iterator.h b/db/compaction/clipping_iterator.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/clipping_iterator.h rename to db/compaction/clipping_iterator.h diff --git a/librocksdb-sys/rocksdb/db/compaction/clipping_iterator_test.cc b/db/compaction/clipping_iterator_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/clipping_iterator_test.cc rename to db/compaction/clipping_iterator_test.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction.cc b/db/compaction/compaction.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction.cc rename to db/compaction/compaction.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction.h b/db/compaction/compaction.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction.h rename to db/compaction/compaction.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_iteration_stats.h b/db/compaction/compaction_iteration_stats.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_iteration_stats.h rename to db/compaction/compaction_iteration_stats.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_iterator.cc b/db/compaction/compaction_iterator.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_iterator.cc rename to db/compaction/compaction_iterator.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_iterator.h b/db/compaction/compaction_iterator.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_iterator.h rename to db/compaction/compaction_iterator.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_iterator_test.cc b/db/compaction/compaction_iterator_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_iterator_test.cc rename to db/compaction/compaction_iterator_test.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_job.cc rename to db/compaction/compaction_job.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_job.h b/db/compaction/compaction_job.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_job.h rename to db/compaction/compaction_job.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_job_stats_test.cc b/db/compaction/compaction_job_stats_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_job_stats_test.cc rename to db/compaction/compaction_job_stats_test.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_job_test.cc rename to db/compaction/compaction_job_test.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_outputs.cc b/db/compaction/compaction_outputs.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_outputs.cc rename to db/compaction/compaction_outputs.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_outputs.h b/db/compaction/compaction_outputs.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_outputs.h rename to db/compaction/compaction_outputs.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker.cc b/db/compaction/compaction_picker.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker.cc rename to db/compaction/compaction_picker.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker.h b/db/compaction/compaction_picker.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker.h rename to db/compaction/compaction_picker.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker_fifo.cc b/db/compaction/compaction_picker_fifo.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker_fifo.cc rename to db/compaction/compaction_picker_fifo.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker_fifo.h b/db/compaction/compaction_picker_fifo.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker_fifo.h rename to db/compaction/compaction_picker_fifo.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker_level.cc b/db/compaction/compaction_picker_level.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker_level.cc rename to db/compaction/compaction_picker_level.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker_level.h b/db/compaction/compaction_picker_level.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker_level.h rename to db/compaction/compaction_picker_level.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker_test.cc b/db/compaction/compaction_picker_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker_test.cc rename to db/compaction/compaction_picker_test.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker_universal.cc b/db/compaction/compaction_picker_universal.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker_universal.cc rename to db/compaction/compaction_picker_universal.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_picker_universal.h b/db/compaction/compaction_picker_universal.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_picker_universal.h rename to db/compaction/compaction_picker_universal.h diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_service_job.cc b/db/compaction/compaction_service_job.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_service_job.cc rename to db/compaction/compaction_service_job.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_service_test.cc b/db/compaction/compaction_service_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_service_test.cc rename to db/compaction/compaction_service_test.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_state.cc b/db/compaction/compaction_state.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_state.cc rename to db/compaction/compaction_state.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/compaction_state.h b/db/compaction/compaction_state.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/compaction_state.h rename to db/compaction/compaction_state.h diff --git a/librocksdb-sys/rocksdb/db/compaction/file_pri.h b/db/compaction/file_pri.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/file_pri.h rename to db/compaction/file_pri.h diff --git a/librocksdb-sys/rocksdb/db/compaction/sst_partitioner.cc b/db/compaction/sst_partitioner.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/sst_partitioner.cc rename to db/compaction/sst_partitioner.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/subcompaction_state.cc b/db/compaction/subcompaction_state.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/subcompaction_state.cc rename to db/compaction/subcompaction_state.cc diff --git a/librocksdb-sys/rocksdb/db/compaction/subcompaction_state.h b/db/compaction/subcompaction_state.h similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/subcompaction_state.h rename to db/compaction/subcompaction_state.h diff --git a/librocksdb-sys/rocksdb/db/compaction/tiered_compaction_test.cc b/db/compaction/tiered_compaction_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/compaction/tiered_compaction_test.cc rename to db/compaction/tiered_compaction_test.cc diff --git a/librocksdb-sys/rocksdb/db/comparator_db_test.cc b/db/comparator_db_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/comparator_db_test.cc rename to db/comparator_db_test.cc diff --git a/librocksdb-sys/rocksdb/db/convenience.cc b/db/convenience.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/convenience.cc rename to db/convenience.cc diff --git a/librocksdb-sys/rocksdb/db/corruption_test.cc b/db/corruption_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/corruption_test.cc rename to db/corruption_test.cc diff --git a/librocksdb-sys/rocksdb/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/cuckoo_table_db_test.cc rename to db/cuckoo_table_db_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_basic_test.cc b/db/db_basic_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_basic_test.cc rename to db/db_basic_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_block_cache_test.cc b/db/db_block_cache_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_block_cache_test.cc rename to db/db_block_cache_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_bloom_filter_test.cc rename to db/db_bloom_filter_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_clip_test.cc b/db/db_clip_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_clip_test.cc rename to db/db_clip_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_compaction_filter_test.cc rename to db/db_compaction_filter_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_compaction_test.cc b/db/db_compaction_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_compaction_test.cc rename to db/db_compaction_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_dynamic_level_test.cc b/db/db_dynamic_level_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_dynamic_level_test.cc rename to db/db_dynamic_level_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_encryption_test.cc b/db/db_encryption_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_encryption_test.cc rename to db/db_encryption_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_filesnapshot.cc b/db/db_filesnapshot.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_filesnapshot.cc rename to db/db_filesnapshot.cc diff --git a/librocksdb-sys/rocksdb/db/db_flush_test.cc b/db/db_flush_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_flush_test.cc rename to db/db_flush_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/compacted_db_impl.cc b/db/db_impl/compacted_db_impl.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/compacted_db_impl.cc rename to db/db_impl/compacted_db_impl.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/compacted_db_impl.h b/db/db_impl/compacted_db_impl.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/compacted_db_impl.h rename to db/db_impl/compacted_db_impl.h diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl.cc rename to db/db_impl/db_impl.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl.h b/db/db_impl/db_impl.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl.h rename to db/db_impl/db_impl.h diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_compaction_flush.cc rename to db/db_impl/db_impl_compaction_flush.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_debug.cc rename to db/db_impl/db_impl_debug.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_experimental.cc b/db/db_impl/db_impl_experimental.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_experimental.cc rename to db/db_impl/db_impl_experimental.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_files.cc b/db/db_impl/db_impl_files.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_files.cc rename to db/db_impl/db_impl_files.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_open.cc rename to db/db_impl/db_impl_open.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_readonly.cc b/db/db_impl/db_impl_readonly.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_readonly.cc rename to db/db_impl/db_impl_readonly.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_readonly.h b/db/db_impl/db_impl_readonly.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_readonly.h rename to db/db_impl/db_impl_readonly.h diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_secondary.cc b/db/db_impl/db_impl_secondary.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_secondary.cc rename to db/db_impl/db_impl_secondary.cc diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_secondary.h b/db/db_impl/db_impl_secondary.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_secondary.h rename to db/db_impl/db_impl_secondary.h diff --git a/librocksdb-sys/rocksdb/db/db_impl/db_impl_write.cc b/db/db_impl/db_impl_write.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_impl/db_impl_write.cc rename to db/db_impl/db_impl_write.cc diff --git a/librocksdb-sys/rocksdb/db/db_info_dumper.cc b/db/db_info_dumper.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_info_dumper.cc rename to db/db_info_dumper.cc diff --git a/librocksdb-sys/rocksdb/db/db_info_dumper.h b/db/db_info_dumper.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_info_dumper.h rename to db/db_info_dumper.h diff --git a/librocksdb-sys/rocksdb/db/db_inplace_update_test.cc b/db/db_inplace_update_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_inplace_update_test.cc rename to db/db_inplace_update_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_io_failure_test.cc b/db/db_io_failure_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_io_failure_test.cc rename to db/db_io_failure_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_iter.cc b/db/db_iter.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_iter.cc rename to db/db_iter.cc diff --git a/librocksdb-sys/rocksdb/db/db_iter.h b/db/db_iter.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_iter.h rename to db/db_iter.h diff --git a/librocksdb-sys/rocksdb/db/db_iter_stress_test.cc b/db/db_iter_stress_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_iter_stress_test.cc rename to db/db_iter_stress_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_iter_test.cc b/db/db_iter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_iter_test.cc rename to db/db_iter_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_iterator_test.cc b/db/db_iterator_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_iterator_test.cc rename to db/db_iterator_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_kv_checksum_test.cc b/db/db_kv_checksum_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_kv_checksum_test.cc rename to db/db_kv_checksum_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_log_iter_test.cc b/db/db_log_iter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_log_iter_test.cc rename to db/db_log_iter_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_logical_block_size_cache_test.cc b/db/db_logical_block_size_cache_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_logical_block_size_cache_test.cc rename to db/db_logical_block_size_cache_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_memtable_test.cc b/db/db_memtable_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_memtable_test.cc rename to db/db_memtable_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_merge_operand_test.cc b/db/db_merge_operand_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_merge_operand_test.cc rename to db/db_merge_operand_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_merge_operator_test.cc b/db/db_merge_operator_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_merge_operator_test.cc rename to db/db_merge_operator_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_options_test.cc b/db/db_options_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_options_test.cc rename to db/db_options_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_properties_test.cc b/db/db_properties_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_properties_test.cc rename to db/db_properties_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_range_del_test.cc b/db/db_range_del_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_range_del_test.cc rename to db/db_range_del_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_rate_limiter_test.cc b/db/db_rate_limiter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_rate_limiter_test.cc rename to db/db_rate_limiter_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_readonly_with_timestamp_test.cc b/db/db_readonly_with_timestamp_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_readonly_with_timestamp_test.cc rename to db/db_readonly_with_timestamp_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_secondary_test.cc b/db/db_secondary_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_secondary_test.cc rename to db/db_secondary_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_sst_test.cc b/db/db_sst_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_sst_test.cc rename to db/db_sst_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_statistics_test.cc b/db/db_statistics_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_statistics_test.cc rename to db/db_statistics_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_table_properties_test.cc b/db/db_table_properties_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_table_properties_test.cc rename to db/db_table_properties_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_tailing_iter_test.cc b/db/db_tailing_iter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_tailing_iter_test.cc rename to db/db_tailing_iter_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_test.cc b/db/db_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_test.cc rename to db/db_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_test2.cc b/db/db_test2.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_test2.cc rename to db/db_test2.cc diff --git a/librocksdb-sys/rocksdb/db/db_test_util.cc b/db/db_test_util.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_test_util.cc rename to db/db_test_util.cc diff --git a/librocksdb-sys/rocksdb/db/db_test_util.h b/db/db_test_util.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_test_util.h rename to db/db_test_util.h diff --git a/librocksdb-sys/rocksdb/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_universal_compaction_test.cc rename to db/db_universal_compaction_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_wal_test.cc b/db/db_wal_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_wal_test.cc rename to db/db_wal_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_with_timestamp_basic_test.cc b/db/db_with_timestamp_basic_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_with_timestamp_basic_test.cc rename to db/db_with_timestamp_basic_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_with_timestamp_compaction_test.cc b/db/db_with_timestamp_compaction_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_with_timestamp_compaction_test.cc rename to db/db_with_timestamp_compaction_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_with_timestamp_test_util.cc b/db/db_with_timestamp_test_util.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_with_timestamp_test_util.cc rename to db/db_with_timestamp_test_util.cc diff --git a/librocksdb-sys/rocksdb/db/db_with_timestamp_test_util.h b/db/db_with_timestamp_test_util.h similarity index 100% rename from librocksdb-sys/rocksdb/db/db_with_timestamp_test_util.h rename to db/db_with_timestamp_test_util.h diff --git a/librocksdb-sys/rocksdb/db/db_write_buffer_manager_test.cc b/db/db_write_buffer_manager_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_write_buffer_manager_test.cc rename to db/db_write_buffer_manager_test.cc diff --git a/librocksdb-sys/rocksdb/db/db_write_test.cc b/db/db_write_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/db_write_test.cc rename to db/db_write_test.cc diff --git a/librocksdb-sys/rocksdb/db/dbformat.cc b/db/dbformat.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/dbformat.cc rename to db/dbformat.cc diff --git a/librocksdb-sys/rocksdb/db/dbformat.h b/db/dbformat.h similarity index 100% rename from librocksdb-sys/rocksdb/db/dbformat.h rename to db/dbformat.h diff --git a/librocksdb-sys/rocksdb/db/dbformat_test.cc b/db/dbformat_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/dbformat_test.cc rename to db/dbformat_test.cc diff --git a/librocksdb-sys/rocksdb/db/deletefile_test.cc b/db/deletefile_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/deletefile_test.cc rename to db/deletefile_test.cc diff --git a/librocksdb-sys/rocksdb/db/error_handler.cc b/db/error_handler.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/error_handler.cc rename to db/error_handler.cc diff --git a/librocksdb-sys/rocksdb/db/error_handler.h b/db/error_handler.h similarity index 100% rename from librocksdb-sys/rocksdb/db/error_handler.h rename to db/error_handler.h diff --git a/librocksdb-sys/rocksdb/db/error_handler_fs_test.cc b/db/error_handler_fs_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/error_handler_fs_test.cc rename to db/error_handler_fs_test.cc diff --git a/librocksdb-sys/rocksdb/db/event_helpers.cc b/db/event_helpers.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/event_helpers.cc rename to db/event_helpers.cc diff --git a/librocksdb-sys/rocksdb/db/event_helpers.h b/db/event_helpers.h similarity index 100% rename from librocksdb-sys/rocksdb/db/event_helpers.h rename to db/event_helpers.h diff --git a/librocksdb-sys/rocksdb/db/experimental.cc b/db/experimental.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/experimental.cc rename to db/experimental.cc diff --git a/librocksdb-sys/rocksdb/db/external_sst_file_basic_test.cc b/db/external_sst_file_basic_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/external_sst_file_basic_test.cc rename to db/external_sst_file_basic_test.cc diff --git a/librocksdb-sys/rocksdb/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/external_sst_file_ingestion_job.cc rename to db/external_sst_file_ingestion_job.cc diff --git a/librocksdb-sys/rocksdb/db/external_sst_file_ingestion_job.h b/db/external_sst_file_ingestion_job.h similarity index 100% rename from librocksdb-sys/rocksdb/db/external_sst_file_ingestion_job.h rename to db/external_sst_file_ingestion_job.h diff --git a/librocksdb-sys/rocksdb/db/external_sst_file_test.cc b/db/external_sst_file_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/external_sst_file_test.cc rename to db/external_sst_file_test.cc diff --git a/librocksdb-sys/rocksdb/db/fault_injection_test.cc b/db/fault_injection_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/fault_injection_test.cc rename to db/fault_injection_test.cc diff --git a/librocksdb-sys/rocksdb/db/file_indexer.cc b/db/file_indexer.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/file_indexer.cc rename to db/file_indexer.cc diff --git a/librocksdb-sys/rocksdb/db/file_indexer.h b/db/file_indexer.h similarity index 100% rename from librocksdb-sys/rocksdb/db/file_indexer.h rename to db/file_indexer.h diff --git a/librocksdb-sys/rocksdb/db/file_indexer_test.cc b/db/file_indexer_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/file_indexer_test.cc rename to db/file_indexer_test.cc diff --git a/librocksdb-sys/rocksdb/db/filename_test.cc b/db/filename_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/filename_test.cc rename to db/filename_test.cc diff --git a/librocksdb-sys/rocksdb/db/flush_job.cc b/db/flush_job.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/flush_job.cc rename to db/flush_job.cc diff --git a/librocksdb-sys/rocksdb/db/flush_job.h b/db/flush_job.h similarity index 100% rename from librocksdb-sys/rocksdb/db/flush_job.h rename to db/flush_job.h diff --git a/librocksdb-sys/rocksdb/db/flush_job_test.cc b/db/flush_job_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/flush_job_test.cc rename to db/flush_job_test.cc diff --git a/librocksdb-sys/rocksdb/db/flush_scheduler.cc b/db/flush_scheduler.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/flush_scheduler.cc rename to db/flush_scheduler.cc diff --git a/librocksdb-sys/rocksdb/db/flush_scheduler.h b/db/flush_scheduler.h similarity index 100% rename from librocksdb-sys/rocksdb/db/flush_scheduler.h rename to db/flush_scheduler.h diff --git a/librocksdb-sys/rocksdb/db/forward_iterator.cc b/db/forward_iterator.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/forward_iterator.cc rename to db/forward_iterator.cc diff --git a/librocksdb-sys/rocksdb/db/forward_iterator.h b/db/forward_iterator.h similarity index 100% rename from librocksdb-sys/rocksdb/db/forward_iterator.h rename to db/forward_iterator.h diff --git a/librocksdb-sys/rocksdb/db/forward_iterator_bench.cc b/db/forward_iterator_bench.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/forward_iterator_bench.cc rename to db/forward_iterator_bench.cc diff --git a/librocksdb-sys/rocksdb/db/history_trimming_iterator.h b/db/history_trimming_iterator.h similarity index 100% rename from librocksdb-sys/rocksdb/db/history_trimming_iterator.h rename to db/history_trimming_iterator.h diff --git a/librocksdb-sys/rocksdb/db/import_column_family_job.cc b/db/import_column_family_job.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/import_column_family_job.cc rename to db/import_column_family_job.cc diff --git a/librocksdb-sys/rocksdb/db/import_column_family_job.h b/db/import_column_family_job.h similarity index 100% rename from librocksdb-sys/rocksdb/db/import_column_family_job.h rename to db/import_column_family_job.h diff --git a/librocksdb-sys/rocksdb/db/import_column_family_test.cc b/db/import_column_family_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/import_column_family_test.cc rename to db/import_column_family_test.cc diff --git a/librocksdb-sys/rocksdb/db/internal_stats.cc b/db/internal_stats.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/internal_stats.cc rename to db/internal_stats.cc diff --git a/librocksdb-sys/rocksdb/db/internal_stats.h b/db/internal_stats.h similarity index 100% rename from librocksdb-sys/rocksdb/db/internal_stats.h rename to db/internal_stats.h diff --git a/librocksdb-sys/rocksdb/db/job_context.h b/db/job_context.h similarity index 100% rename from librocksdb-sys/rocksdb/db/job_context.h rename to db/job_context.h diff --git a/librocksdb-sys/rocksdb/db/kv_checksum.h b/db/kv_checksum.h similarity index 100% rename from librocksdb-sys/rocksdb/db/kv_checksum.h rename to db/kv_checksum.h diff --git a/librocksdb-sys/rocksdb/db/listener_test.cc b/db/listener_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/listener_test.cc rename to db/listener_test.cc diff --git a/librocksdb-sys/rocksdb/db/log_format.h b/db/log_format.h similarity index 100% rename from librocksdb-sys/rocksdb/db/log_format.h rename to db/log_format.h diff --git a/librocksdb-sys/rocksdb/db/log_reader.cc b/db/log_reader.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/log_reader.cc rename to db/log_reader.cc diff --git a/librocksdb-sys/rocksdb/db/log_reader.h b/db/log_reader.h similarity index 100% rename from librocksdb-sys/rocksdb/db/log_reader.h rename to db/log_reader.h diff --git a/librocksdb-sys/rocksdb/db/log_test.cc b/db/log_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/log_test.cc rename to db/log_test.cc diff --git a/librocksdb-sys/rocksdb/db/log_writer.cc b/db/log_writer.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/log_writer.cc rename to db/log_writer.cc diff --git a/librocksdb-sys/rocksdb/db/log_writer.h b/db/log_writer.h similarity index 100% rename from librocksdb-sys/rocksdb/db/log_writer.h rename to db/log_writer.h diff --git a/librocksdb-sys/rocksdb/db/logs_with_prep_tracker.cc b/db/logs_with_prep_tracker.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/logs_with_prep_tracker.cc rename to db/logs_with_prep_tracker.cc diff --git a/librocksdb-sys/rocksdb/db/logs_with_prep_tracker.h b/db/logs_with_prep_tracker.h similarity index 100% rename from librocksdb-sys/rocksdb/db/logs_with_prep_tracker.h rename to db/logs_with_prep_tracker.h diff --git a/librocksdb-sys/rocksdb/db/lookup_key.h b/db/lookup_key.h similarity index 100% rename from librocksdb-sys/rocksdb/db/lookup_key.h rename to db/lookup_key.h diff --git a/librocksdb-sys/rocksdb/db/malloc_stats.cc b/db/malloc_stats.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/malloc_stats.cc rename to db/malloc_stats.cc diff --git a/librocksdb-sys/rocksdb/db/malloc_stats.h b/db/malloc_stats.h similarity index 100% rename from librocksdb-sys/rocksdb/db/malloc_stats.h rename to db/malloc_stats.h diff --git a/librocksdb-sys/rocksdb/db/manual_compaction_test.cc b/db/manual_compaction_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/manual_compaction_test.cc rename to db/manual_compaction_test.cc diff --git a/librocksdb-sys/rocksdb/db/memtable.cc b/db/memtable.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/memtable.cc rename to db/memtable.cc diff --git a/librocksdb-sys/rocksdb/db/memtable.h b/db/memtable.h similarity index 100% rename from librocksdb-sys/rocksdb/db/memtable.h rename to db/memtable.h diff --git a/librocksdb-sys/rocksdb/db/memtable_list.cc b/db/memtable_list.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/memtable_list.cc rename to db/memtable_list.cc diff --git a/librocksdb-sys/rocksdb/db/memtable_list.h b/db/memtable_list.h similarity index 100% rename from librocksdb-sys/rocksdb/db/memtable_list.h rename to db/memtable_list.h diff --git a/librocksdb-sys/rocksdb/db/memtable_list_test.cc b/db/memtable_list_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/memtable_list_test.cc rename to db/memtable_list_test.cc diff --git a/librocksdb-sys/rocksdb/db/merge_context.h b/db/merge_context.h similarity index 100% rename from librocksdb-sys/rocksdb/db/merge_context.h rename to db/merge_context.h diff --git a/librocksdb-sys/rocksdb/db/merge_helper.cc b/db/merge_helper.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/merge_helper.cc rename to db/merge_helper.cc diff --git a/librocksdb-sys/rocksdb/db/merge_helper.h b/db/merge_helper.h similarity index 100% rename from librocksdb-sys/rocksdb/db/merge_helper.h rename to db/merge_helper.h diff --git a/librocksdb-sys/rocksdb/db/merge_helper_test.cc b/db/merge_helper_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/merge_helper_test.cc rename to db/merge_helper_test.cc diff --git a/librocksdb-sys/rocksdb/db/merge_operator.cc b/db/merge_operator.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/merge_operator.cc rename to db/merge_operator.cc diff --git a/librocksdb-sys/rocksdb/db/merge_test.cc b/db/merge_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/merge_test.cc rename to db/merge_test.cc diff --git a/librocksdb-sys/rocksdb/db/obsolete_files_test.cc b/db/obsolete_files_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/obsolete_files_test.cc rename to db/obsolete_files_test.cc diff --git a/librocksdb-sys/rocksdb/db/options_file_test.cc b/db/options_file_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/options_file_test.cc rename to db/options_file_test.cc diff --git a/librocksdb-sys/rocksdb/db/output_validator.cc b/db/output_validator.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/output_validator.cc rename to db/output_validator.cc diff --git a/librocksdb-sys/rocksdb/db/output_validator.h b/db/output_validator.h similarity index 100% rename from librocksdb-sys/rocksdb/db/output_validator.h rename to db/output_validator.h diff --git a/librocksdb-sys/rocksdb/db/perf_context_test.cc b/db/perf_context_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/perf_context_test.cc rename to db/perf_context_test.cc diff --git a/librocksdb-sys/rocksdb/db/periodic_task_scheduler.cc b/db/periodic_task_scheduler.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/periodic_task_scheduler.cc rename to db/periodic_task_scheduler.cc diff --git a/librocksdb-sys/rocksdb/db/periodic_task_scheduler.h b/db/periodic_task_scheduler.h similarity index 100% rename from librocksdb-sys/rocksdb/db/periodic_task_scheduler.h rename to db/periodic_task_scheduler.h diff --git a/librocksdb-sys/rocksdb/db/periodic_task_scheduler_test.cc b/db/periodic_task_scheduler_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/periodic_task_scheduler_test.cc rename to db/periodic_task_scheduler_test.cc diff --git a/librocksdb-sys/rocksdb/db/pinned_iterators_manager.h b/db/pinned_iterators_manager.h similarity index 100% rename from librocksdb-sys/rocksdb/db/pinned_iterators_manager.h rename to db/pinned_iterators_manager.h diff --git a/librocksdb-sys/rocksdb/db/plain_table_db_test.cc b/db/plain_table_db_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/plain_table_db_test.cc rename to db/plain_table_db_test.cc diff --git a/librocksdb-sys/rocksdb/db/post_memtable_callback.h b/db/post_memtable_callback.h similarity index 100% rename from librocksdb-sys/rocksdb/db/post_memtable_callback.h rename to db/post_memtable_callback.h diff --git a/librocksdb-sys/rocksdb/db/pre_release_callback.h b/db/pre_release_callback.h similarity index 100% rename from librocksdb-sys/rocksdb/db/pre_release_callback.h rename to db/pre_release_callback.h diff --git a/librocksdb-sys/rocksdb/db/prefix_test.cc b/db/prefix_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/prefix_test.cc rename to db/prefix_test.cc diff --git a/librocksdb-sys/rocksdb/db/range_del_aggregator.cc b/db/range_del_aggregator.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/range_del_aggregator.cc rename to db/range_del_aggregator.cc diff --git a/librocksdb-sys/rocksdb/db/range_del_aggregator.h b/db/range_del_aggregator.h similarity index 100% rename from librocksdb-sys/rocksdb/db/range_del_aggregator.h rename to db/range_del_aggregator.h diff --git a/librocksdb-sys/rocksdb/db/range_del_aggregator_bench.cc b/db/range_del_aggregator_bench.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/range_del_aggregator_bench.cc rename to db/range_del_aggregator_bench.cc diff --git a/librocksdb-sys/rocksdb/db/range_del_aggregator_test.cc b/db/range_del_aggregator_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/range_del_aggregator_test.cc rename to db/range_del_aggregator_test.cc diff --git a/librocksdb-sys/rocksdb/db/range_tombstone_fragmenter.cc b/db/range_tombstone_fragmenter.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/range_tombstone_fragmenter.cc rename to db/range_tombstone_fragmenter.cc diff --git a/librocksdb-sys/rocksdb/db/range_tombstone_fragmenter.h b/db/range_tombstone_fragmenter.h similarity index 100% rename from librocksdb-sys/rocksdb/db/range_tombstone_fragmenter.h rename to db/range_tombstone_fragmenter.h diff --git a/librocksdb-sys/rocksdb/db/range_tombstone_fragmenter_test.cc b/db/range_tombstone_fragmenter_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/range_tombstone_fragmenter_test.cc rename to db/range_tombstone_fragmenter_test.cc diff --git a/librocksdb-sys/rocksdb/db/read_callback.h b/db/read_callback.h similarity index 100% rename from librocksdb-sys/rocksdb/db/read_callback.h rename to db/read_callback.h diff --git a/librocksdb-sys/rocksdb/db/repair.cc b/db/repair.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/repair.cc rename to db/repair.cc diff --git a/librocksdb-sys/rocksdb/db/repair_test.cc b/db/repair_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/repair_test.cc rename to db/repair_test.cc diff --git a/librocksdb-sys/rocksdb/db/seqno_time_test.cc b/db/seqno_time_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/seqno_time_test.cc rename to db/seqno_time_test.cc diff --git a/librocksdb-sys/rocksdb/db/seqno_to_time_mapping.cc b/db/seqno_to_time_mapping.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/seqno_to_time_mapping.cc rename to db/seqno_to_time_mapping.cc diff --git a/librocksdb-sys/rocksdb/db/seqno_to_time_mapping.h b/db/seqno_to_time_mapping.h similarity index 100% rename from librocksdb-sys/rocksdb/db/seqno_to_time_mapping.h rename to db/seqno_to_time_mapping.h diff --git a/librocksdb-sys/rocksdb/db/snapshot_checker.h b/db/snapshot_checker.h similarity index 100% rename from librocksdb-sys/rocksdb/db/snapshot_checker.h rename to db/snapshot_checker.h diff --git a/librocksdb-sys/rocksdb/db/snapshot_impl.cc b/db/snapshot_impl.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/snapshot_impl.cc rename to db/snapshot_impl.cc diff --git a/librocksdb-sys/rocksdb/db/snapshot_impl.h b/db/snapshot_impl.h similarity index 100% rename from librocksdb-sys/rocksdb/db/snapshot_impl.h rename to db/snapshot_impl.h diff --git a/librocksdb-sys/rocksdb/db/table_cache.cc b/db/table_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/table_cache.cc rename to db/table_cache.cc diff --git a/librocksdb-sys/rocksdb/db/table_cache.h b/db/table_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/db/table_cache.h rename to db/table_cache.h diff --git a/librocksdb-sys/rocksdb/db/table_cache_sync_and_async.h b/db/table_cache_sync_and_async.h similarity index 100% rename from librocksdb-sys/rocksdb/db/table_cache_sync_and_async.h rename to db/table_cache_sync_and_async.h diff --git a/librocksdb-sys/rocksdb/db/table_properties_collector.cc b/db/table_properties_collector.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/table_properties_collector.cc rename to db/table_properties_collector.cc diff --git a/librocksdb-sys/rocksdb/db/table_properties_collector.h b/db/table_properties_collector.h similarity index 100% rename from librocksdb-sys/rocksdb/db/table_properties_collector.h rename to db/table_properties_collector.h diff --git a/librocksdb-sys/rocksdb/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/table_properties_collector_test.cc rename to db/table_properties_collector_test.cc diff --git a/librocksdb-sys/rocksdb/db/transaction_log_impl.cc b/db/transaction_log_impl.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/transaction_log_impl.cc rename to db/transaction_log_impl.cc diff --git a/librocksdb-sys/rocksdb/db/transaction_log_impl.h b/db/transaction_log_impl.h similarity index 100% rename from librocksdb-sys/rocksdb/db/transaction_log_impl.h rename to db/transaction_log_impl.h diff --git a/librocksdb-sys/rocksdb/db/trim_history_scheduler.cc b/db/trim_history_scheduler.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/trim_history_scheduler.cc rename to db/trim_history_scheduler.cc diff --git a/librocksdb-sys/rocksdb/db/trim_history_scheduler.h b/db/trim_history_scheduler.h similarity index 100% rename from librocksdb-sys/rocksdb/db/trim_history_scheduler.h rename to db/trim_history_scheduler.h diff --git a/librocksdb-sys/rocksdb/db/version_builder.cc b/db/version_builder.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/version_builder.cc rename to db/version_builder.cc diff --git a/librocksdb-sys/rocksdb/db/version_builder.h b/db/version_builder.h similarity index 100% rename from librocksdb-sys/rocksdb/db/version_builder.h rename to db/version_builder.h diff --git a/librocksdb-sys/rocksdb/db/version_builder_test.cc b/db/version_builder_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/version_builder_test.cc rename to db/version_builder_test.cc diff --git a/librocksdb-sys/rocksdb/db/version_edit.cc b/db/version_edit.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/version_edit.cc rename to db/version_edit.cc diff --git a/librocksdb-sys/rocksdb/db/version_edit.h b/db/version_edit.h similarity index 100% rename from librocksdb-sys/rocksdb/db/version_edit.h rename to db/version_edit.h diff --git a/librocksdb-sys/rocksdb/db/version_edit_handler.cc b/db/version_edit_handler.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/version_edit_handler.cc rename to db/version_edit_handler.cc diff --git a/librocksdb-sys/rocksdb/db/version_edit_handler.h b/db/version_edit_handler.h similarity index 100% rename from librocksdb-sys/rocksdb/db/version_edit_handler.h rename to db/version_edit_handler.h diff --git a/librocksdb-sys/rocksdb/db/version_edit_test.cc b/db/version_edit_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/version_edit_test.cc rename to db/version_edit_test.cc diff --git a/librocksdb-sys/rocksdb/db/version_set.cc b/db/version_set.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/version_set.cc rename to db/version_set.cc diff --git a/librocksdb-sys/rocksdb/db/version_set.h b/db/version_set.h similarity index 100% rename from librocksdb-sys/rocksdb/db/version_set.h rename to db/version_set.h diff --git a/librocksdb-sys/rocksdb/db/version_set_sync_and_async.h b/db/version_set_sync_and_async.h similarity index 100% rename from librocksdb-sys/rocksdb/db/version_set_sync_and_async.h rename to db/version_set_sync_and_async.h diff --git a/librocksdb-sys/rocksdb/db/version_set_test.cc b/db/version_set_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/version_set_test.cc rename to db/version_set_test.cc diff --git a/librocksdb-sys/rocksdb/db/version_util.h b/db/version_util.h similarity index 100% rename from librocksdb-sys/rocksdb/db/version_util.h rename to db/version_util.h diff --git a/librocksdb-sys/rocksdb/db/wal_edit.cc b/db/wal_edit.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wal_edit.cc rename to db/wal_edit.cc diff --git a/librocksdb-sys/rocksdb/db/wal_edit.h b/db/wal_edit.h similarity index 100% rename from librocksdb-sys/rocksdb/db/wal_edit.h rename to db/wal_edit.h diff --git a/librocksdb-sys/rocksdb/db/wal_edit_test.cc b/db/wal_edit_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wal_edit_test.cc rename to db/wal_edit_test.cc diff --git a/librocksdb-sys/rocksdb/db/wal_manager.cc b/db/wal_manager.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wal_manager.cc rename to db/wal_manager.cc diff --git a/librocksdb-sys/rocksdb/db/wal_manager.h b/db/wal_manager.h similarity index 100% rename from librocksdb-sys/rocksdb/db/wal_manager.h rename to db/wal_manager.h diff --git a/librocksdb-sys/rocksdb/db/wal_manager_test.cc b/db/wal_manager_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wal_manager_test.cc rename to db/wal_manager_test.cc diff --git a/librocksdb-sys/rocksdb/db/wide/db_wide_basic_test.cc b/db/wide/db_wide_basic_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wide/db_wide_basic_test.cc rename to db/wide/db_wide_basic_test.cc diff --git a/librocksdb-sys/rocksdb/db/wide/wide_column_serialization.cc b/db/wide/wide_column_serialization.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wide/wide_column_serialization.cc rename to db/wide/wide_column_serialization.cc diff --git a/librocksdb-sys/rocksdb/db/wide/wide_column_serialization.h b/db/wide/wide_column_serialization.h similarity index 100% rename from librocksdb-sys/rocksdb/db/wide/wide_column_serialization.h rename to db/wide/wide_column_serialization.h diff --git a/librocksdb-sys/rocksdb/db/wide/wide_column_serialization_test.cc b/db/wide/wide_column_serialization_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wide/wide_column_serialization_test.cc rename to db/wide/wide_column_serialization_test.cc diff --git a/librocksdb-sys/rocksdb/db/wide/wide_columns.cc b/db/wide/wide_columns.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/wide/wide_columns.cc rename to db/wide/wide_columns.cc diff --git a/librocksdb-sys/rocksdb/db/write_batch.cc b/db/write_batch.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_batch.cc rename to db/write_batch.cc diff --git a/librocksdb-sys/rocksdb/db/write_batch_base.cc b/db/write_batch_base.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_batch_base.cc rename to db/write_batch_base.cc diff --git a/librocksdb-sys/rocksdb/db/write_batch_internal.h b/db/write_batch_internal.h similarity index 100% rename from librocksdb-sys/rocksdb/db/write_batch_internal.h rename to db/write_batch_internal.h diff --git a/librocksdb-sys/rocksdb/db/write_batch_test.cc b/db/write_batch_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_batch_test.cc rename to db/write_batch_test.cc diff --git a/librocksdb-sys/rocksdb/db/write_callback.h b/db/write_callback.h similarity index 100% rename from librocksdb-sys/rocksdb/db/write_callback.h rename to db/write_callback.h diff --git a/librocksdb-sys/rocksdb/db/write_callback_test.cc b/db/write_callback_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_callback_test.cc rename to db/write_callback_test.cc diff --git a/librocksdb-sys/rocksdb/db/write_controller.cc b/db/write_controller.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_controller.cc rename to db/write_controller.cc diff --git a/librocksdb-sys/rocksdb/db/write_controller.h b/db/write_controller.h similarity index 100% rename from librocksdb-sys/rocksdb/db/write_controller.h rename to db/write_controller.h diff --git a/librocksdb-sys/rocksdb/db/write_controller_test.cc b/db/write_controller_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_controller_test.cc rename to db/write_controller_test.cc diff --git a/librocksdb-sys/rocksdb/db/write_stall_stats.cc b/db/write_stall_stats.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_stall_stats.cc rename to db/write_stall_stats.cc diff --git a/librocksdb-sys/rocksdb/db/write_stall_stats.h b/db/write_stall_stats.h similarity index 100% rename from librocksdb-sys/rocksdb/db/write_stall_stats.h rename to db/write_stall_stats.h diff --git a/librocksdb-sys/rocksdb/db/write_thread.cc b/db/write_thread.cc similarity index 100% rename from librocksdb-sys/rocksdb/db/write_thread.cc rename to db/write_thread.cc diff --git a/librocksdb-sys/rocksdb/db/write_thread.h b/db/write_thread.h similarity index 100% rename from librocksdb-sys/rocksdb/db/write_thread.h rename to db/write_thread.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/CMakeLists.txt b/db_stress_tool/CMakeLists.txt similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/CMakeLists.txt rename to db_stress_tool/CMakeLists.txt diff --git a/librocksdb-sys/rocksdb/db_stress_tool/batched_ops_stress.cc b/db_stress_tool/batched_ops_stress.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/batched_ops_stress.cc rename to db_stress_tool/batched_ops_stress.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/cf_consistency_stress.cc b/db_stress_tool/cf_consistency_stress.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/cf_consistency_stress.cc rename to db_stress_tool/cf_consistency_stress.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress.cc b/db_stress_tool/db_stress.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress.cc rename to db_stress_tool/db_stress.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_common.cc b/db_stress_tool/db_stress_common.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_common.cc rename to db_stress_tool/db_stress_common.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_common.h b/db_stress_tool/db_stress_common.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_common.h rename to db_stress_tool/db_stress_common.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_compaction_filter.h b/db_stress_tool/db_stress_compaction_filter.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_compaction_filter.h rename to db_stress_tool/db_stress_compaction_filter.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_driver.cc b/db_stress_tool/db_stress_driver.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_driver.cc rename to db_stress_tool/db_stress_driver.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_driver.h b/db_stress_tool/db_stress_driver.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_driver.h rename to db_stress_tool/db_stress_driver.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_env_wrapper.h b/db_stress_tool/db_stress_env_wrapper.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_env_wrapper.h rename to db_stress_tool/db_stress_env_wrapper.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_gflags.cc b/db_stress_tool/db_stress_gflags.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_gflags.cc rename to db_stress_tool/db_stress_gflags.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_listener.cc b/db_stress_tool/db_stress_listener.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_listener.cc rename to db_stress_tool/db_stress_listener.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_listener.h b/db_stress_tool/db_stress_listener.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_listener.h rename to db_stress_tool/db_stress_listener.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_shared_state.cc b/db_stress_tool/db_stress_shared_state.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_shared_state.cc rename to db_stress_tool/db_stress_shared_state.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_shared_state.h b/db_stress_tool/db_stress_shared_state.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_shared_state.h rename to db_stress_tool/db_stress_shared_state.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_stat.cc b/db_stress_tool/db_stress_stat.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_stat.cc rename to db_stress_tool/db_stress_stat.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_stat.h b/db_stress_tool/db_stress_stat.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_stat.h rename to db_stress_tool/db_stress_stat.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_table_properties_collector.h b/db_stress_tool/db_stress_table_properties_collector.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_table_properties_collector.h rename to db_stress_tool/db_stress_table_properties_collector.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_test_base.cc rename to db_stress_tool/db_stress_test_base.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_test_base.h b/db_stress_tool/db_stress_test_base.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_test_base.h rename to db_stress_tool/db_stress_test_base.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/db_stress_tool.cc b/db_stress_tool/db_stress_tool.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/db_stress_tool.cc rename to db_stress_tool/db_stress_tool.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/expected_state.cc b/db_stress_tool/expected_state.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/expected_state.cc rename to db_stress_tool/expected_state.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/expected_state.h b/db_stress_tool/expected_state.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/expected_state.h rename to db_stress_tool/expected_state.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/expected_value.cc b/db_stress_tool/expected_value.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/expected_value.cc rename to db_stress_tool/expected_value.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/expected_value.h b/db_stress_tool/expected_value.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/expected_value.h rename to db_stress_tool/expected_value.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/multi_ops_txns_stress.cc b/db_stress_tool/multi_ops_txns_stress.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/multi_ops_txns_stress.cc rename to db_stress_tool/multi_ops_txns_stress.cc diff --git a/librocksdb-sys/rocksdb/db_stress_tool/multi_ops_txns_stress.h b/db_stress_tool/multi_ops_txns_stress.h similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/multi_ops_txns_stress.h rename to db_stress_tool/multi_ops_txns_stress.h diff --git a/librocksdb-sys/rocksdb/db_stress_tool/no_batched_ops_stress.cc b/db_stress_tool/no_batched_ops_stress.cc similarity index 100% rename from librocksdb-sys/rocksdb/db_stress_tool/no_batched_ops_stress.cc rename to db_stress_tool/no_batched_ops_stress.cc diff --git a/librocksdb-sys/rocksdb/docs/.gitignore b/docs/.gitignore similarity index 100% rename from librocksdb-sys/rocksdb/docs/.gitignore rename to docs/.gitignore diff --git a/librocksdb-sys/rocksdb/docs/CNAME b/docs/CNAME similarity index 100% rename from librocksdb-sys/rocksdb/docs/CNAME rename to docs/CNAME diff --git a/librocksdb-sys/rocksdb/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/CONTRIBUTING.md rename to docs/CONTRIBUTING.md diff --git a/librocksdb-sys/rocksdb/docs/Gemfile b/docs/Gemfile similarity index 100% rename from librocksdb-sys/rocksdb/docs/Gemfile rename to docs/Gemfile diff --git a/librocksdb-sys/rocksdb/docs/LICENSE-DOCUMENTATION b/docs/LICENSE-DOCUMENTATION similarity index 100% rename from librocksdb-sys/rocksdb/docs/LICENSE-DOCUMENTATION rename to docs/LICENSE-DOCUMENTATION diff --git a/librocksdb-sys/rocksdb/docs/README.md b/docs/README.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/README.md rename to docs/README.md diff --git a/librocksdb-sys/rocksdb/docs/TEMPLATE-INFORMATION.md b/docs/TEMPLATE-INFORMATION.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/TEMPLATE-INFORMATION.md rename to docs/TEMPLATE-INFORMATION.md diff --git a/librocksdb-sys/rocksdb/docs/_config.yml b/docs/_config.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_config.yml rename to docs/_config.yml diff --git a/librocksdb-sys/rocksdb/docs/_data/authors.yml b/docs/_data/authors.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_data/authors.yml rename to docs/_data/authors.yml diff --git a/librocksdb-sys/rocksdb/docs/_data/features.yml b/docs/_data/features.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_data/features.yml rename to docs/_data/features.yml diff --git a/librocksdb-sys/rocksdb/docs/_data/nav.yml b/docs/_data/nav.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_data/nav.yml rename to docs/_data/nav.yml diff --git a/librocksdb-sys/rocksdb/docs/_data/nav_docs.yml b/docs/_data/nav_docs.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_data/nav_docs.yml rename to docs/_data/nav_docs.yml diff --git a/librocksdb-sys/rocksdb/docs/_data/powered_by.yml b/docs/_data/powered_by.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_data/powered_by.yml rename to docs/_data/powered_by.yml diff --git a/librocksdb-sys/rocksdb/docs/_data/powered_by_highlight.yml b/docs/_data/powered_by_highlight.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_data/powered_by_highlight.yml rename to docs/_data/powered_by_highlight.yml diff --git a/librocksdb-sys/rocksdb/docs/_data/promo.yml b/docs/_data/promo.yml similarity index 100% rename from librocksdb-sys/rocksdb/docs/_data/promo.yml rename to docs/_data/promo.yml diff --git a/librocksdb-sys/rocksdb/docs/_docs/faq.md b/docs/_docs/faq.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/_docs/faq.md rename to docs/_docs/faq.md diff --git a/librocksdb-sys/rocksdb/docs/_docs/getting-started.md b/docs/_docs/getting-started.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/_docs/getting-started.md rename to docs/_docs/getting-started.md diff --git a/librocksdb-sys/rocksdb/docs/_includes/blog_pagination.html b/docs/_includes/blog_pagination.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/blog_pagination.html rename to docs/_includes/blog_pagination.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/content/gridblocks.html b/docs/_includes/content/gridblocks.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/content/gridblocks.html rename to docs/_includes/content/gridblocks.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/content/items/gridblock.html b/docs/_includes/content/items/gridblock.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/content/items/gridblock.html rename to docs/_includes/content/items/gridblock.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/doc.html b/docs/_includes/doc.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/doc.html rename to docs/_includes/doc.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/doc_paging.html b/docs/_includes/doc_paging.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/doc_paging.html rename to docs/_includes/doc_paging.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/footer.html b/docs/_includes/footer.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/footer.html rename to docs/_includes/footer.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/head.html b/docs/_includes/head.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/head.html rename to docs/_includes/head.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/header.html b/docs/_includes/header.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/header.html rename to docs/_includes/header.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/hero.html b/docs/_includes/hero.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/hero.html rename to docs/_includes/hero.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/home_header.html b/docs/_includes/home_header.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/home_header.html rename to docs/_includes/home_header.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/katex_import.html b/docs/_includes/katex_import.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/katex_import.html rename to docs/_includes/katex_import.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/katex_render.html b/docs/_includes/katex_render.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/katex_render.html rename to docs/_includes/katex_render.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/nav.html b/docs/_includes/nav.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/nav.html rename to docs/_includes/nav.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/nav/collection_nav.html b/docs/_includes/nav/collection_nav.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/nav/collection_nav.html rename to docs/_includes/nav/collection_nav.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/nav/collection_nav_group.html b/docs/_includes/nav/collection_nav_group.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/nav/collection_nav_group.html rename to docs/_includes/nav/collection_nav_group.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/nav/collection_nav_group_item.html b/docs/_includes/nav/collection_nav_group_item.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/nav/collection_nav_group_item.html rename to docs/_includes/nav/collection_nav_group_item.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/nav/header_nav.html b/docs/_includes/nav/header_nav.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/nav/header_nav.html rename to docs/_includes/nav/header_nav.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/nav_search.html b/docs/_includes/nav_search.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/nav_search.html rename to docs/_includes/nav_search.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/all_share.html b/docs/_includes/plugins/all_share.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/all_share.html rename to docs/_includes/plugins/all_share.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/ascii_cinema.html b/docs/_includes/plugins/ascii_cinema.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/ascii_cinema.html rename to docs/_includes/plugins/ascii_cinema.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/button.html b/docs/_includes/plugins/button.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/button.html rename to docs/_includes/plugins/button.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/github_star.html b/docs/_includes/plugins/github_star.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/github_star.html rename to docs/_includes/plugins/github_star.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/github_watch.html b/docs/_includes/plugins/github_watch.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/github_watch.html rename to docs/_includes/plugins/github_watch.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/google_share.html b/docs/_includes/plugins/google_share.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/google_share.html rename to docs/_includes/plugins/google_share.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/iframe.html b/docs/_includes/plugins/iframe.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/iframe.html rename to docs/_includes/plugins/iframe.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/like_button.html b/docs/_includes/plugins/like_button.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/like_button.html rename to docs/_includes/plugins/like_button.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/plugin_row.html b/docs/_includes/plugins/plugin_row.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/plugin_row.html rename to docs/_includes/plugins/plugin_row.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/post_social_plugins.html b/docs/_includes/plugins/post_social_plugins.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/post_social_plugins.html rename to docs/_includes/plugins/post_social_plugins.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/slideshow.html b/docs/_includes/plugins/slideshow.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/slideshow.html rename to docs/_includes/plugins/slideshow.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/twitter_follow.html b/docs/_includes/plugins/twitter_follow.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/twitter_follow.html rename to docs/_includes/plugins/twitter_follow.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/plugins/twitter_share.html b/docs/_includes/plugins/twitter_share.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/plugins/twitter_share.html rename to docs/_includes/plugins/twitter_share.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/post.html b/docs/_includes/post.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/post.html rename to docs/_includes/post.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/powered_by.html b/docs/_includes/powered_by.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/powered_by.html rename to docs/_includes/powered_by.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/social_plugins.html b/docs/_includes/social_plugins.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/social_plugins.html rename to docs/_includes/social_plugins.html diff --git a/librocksdb-sys/rocksdb/docs/_includes/ui/button.html b/docs/_includes/ui/button.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_includes/ui/button.html rename to docs/_includes/ui/button.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/basic.html b/docs/_layouts/basic.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/basic.html rename to docs/_layouts/basic.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/blog.html b/docs/_layouts/blog.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/blog.html rename to docs/_layouts/blog.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/blog_default.html b/docs/_layouts/blog_default.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/blog_default.html rename to docs/_layouts/blog_default.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/default.html b/docs/_layouts/default.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/default.html rename to docs/_layouts/default.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/doc_default.html b/docs/_layouts/doc_default.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/doc_default.html rename to docs/_layouts/doc_default.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/doc_page.html b/docs/_layouts/doc_page.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/doc_page.html rename to docs/_layouts/doc_page.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/docs.html b/docs/_layouts/docs.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/docs.html rename to docs/_layouts/docs.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/home.html b/docs/_layouts/home.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/home.html rename to docs/_layouts/home.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/page.html b/docs/_layouts/page.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/page.html rename to docs/_layouts/page.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/plain.html b/docs/_layouts/plain.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/plain.html rename to docs/_layouts/plain.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/post.html b/docs/_layouts/post.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/post.html rename to docs/_layouts/post.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/redirect.html b/docs/_layouts/redirect.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/redirect.html rename to docs/_layouts/redirect.html diff --git a/librocksdb-sys/rocksdb/docs/_layouts/top-level.html b/docs/_layouts/top-level.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/_layouts/top-level.html rename to docs/_layouts/top-level.html diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-03-27-how-to-backup-rocksdb.markdown b/docs/_posts/2014-03-27-how-to-backup-rocksdb.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-03-27-how-to-backup-rocksdb.markdown rename to docs/_posts/2014-03-27-how-to-backup-rocksdb.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-03-27-how-to-persist-in-memory-rocksdb-database.markdown b/docs/_posts/2014-03-27-how-to-persist-in-memory-rocksdb-database.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-03-27-how-to-persist-in-memory-rocksdb-database.markdown rename to docs/_posts/2014-03-27-how-to-persist-in-memory-rocksdb-database.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-04-02-the-1st-rocksdb-local-meetup-held-on-march-27-2014.markdown b/docs/_posts/2014-04-02-the-1st-rocksdb-local-meetup-held-on-march-27-2014.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-04-02-the-1st-rocksdb-local-meetup-held-on-march-27-2014.markdown rename to docs/_posts/2014-04-02-the-1st-rocksdb-local-meetup-held-on-march-27-2014.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-04-07-rocksdb-2-8-release.markdown b/docs/_posts/2014-04-07-rocksdb-2-8-release.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-04-07-rocksdb-2-8-release.markdown rename to docs/_posts/2014-04-07-rocksdb-2-8-release.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-04-21-indexing-sst-files-for-better-lookup-performance.markdown b/docs/_posts/2014-04-21-indexing-sst-files-for-better-lookup-performance.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-04-21-indexing-sst-files-for-better-lookup-performance.markdown rename to docs/_posts/2014-04-21-indexing-sst-files-for-better-lookup-performance.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-05-14-lock.markdown b/docs/_posts/2014-05-14-lock.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-05-14-lock.markdown rename to docs/_posts/2014-05-14-lock.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-05-19-rocksdb-3-0-release.markdown b/docs/_posts/2014-05-19-rocksdb-3-0-release.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-05-19-rocksdb-3-0-release.markdown rename to docs/_posts/2014-05-19-rocksdb-3-0-release.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-05-22-rocksdb-3-1-release.markdown b/docs/_posts/2014-05-22-rocksdb-3-1-release.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-05-22-rocksdb-3-1-release.markdown rename to docs/_posts/2014-05-22-rocksdb-3-1-release.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-06-23-plaintable-a-new-file-format.markdown b/docs/_posts/2014-06-23-plaintable-a-new-file-format.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-06-23-plaintable-a-new-file-format.markdown rename to docs/_posts/2014-06-23-plaintable-a-new-file-format.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-06-27-avoid-expensive-locks-in-get.markdown b/docs/_posts/2014-06-27-avoid-expensive-locks-in-get.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-06-27-avoid-expensive-locks-in-get.markdown rename to docs/_posts/2014-06-27-avoid-expensive-locks-in-get.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-06-27-rocksdb-3-2-release.markdown b/docs/_posts/2014-06-27-rocksdb-3-2-release.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-06-27-rocksdb-3-2-release.markdown rename to docs/_posts/2014-06-27-rocksdb-3-2-release.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-07-29-rocksdb-3-3-release.markdown b/docs/_posts/2014-07-29-rocksdb-3-3-release.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-07-29-rocksdb-3-3-release.markdown rename to docs/_posts/2014-07-29-rocksdb-3-3-release.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-09-12-cuckoo.markdown b/docs/_posts/2014-09-12-cuckoo.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-09-12-cuckoo.markdown rename to docs/_posts/2014-09-12-cuckoo.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-09-12-new-bloom-filter-format.markdown b/docs/_posts/2014-09-12-new-bloom-filter-format.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-09-12-new-bloom-filter-format.markdown rename to docs/_posts/2014-09-12-new-bloom-filter-format.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2014-09-15-rocksdb-3-5-release.markdown b/docs/_posts/2014-09-15-rocksdb-3-5-release.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2014-09-15-rocksdb-3-5-release.markdown rename to docs/_posts/2014-09-15-rocksdb-3-5-release.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-01-16-migrating-from-leveldb-to-rocksdb-2.markdown b/docs/_posts/2015-01-16-migrating-from-leveldb-to-rocksdb-2.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-01-16-migrating-from-leveldb-to-rocksdb-2.markdown rename to docs/_posts/2015-01-16-migrating-from-leveldb-to-rocksdb-2.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-02-24-reading-rocksdb-options-from-a-file.markdown b/docs/_posts/2015-02-24-reading-rocksdb-options-from-a-file.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-02-24-reading-rocksdb-options-from-a-file.markdown rename to docs/_posts/2015-02-24-reading-rocksdb-options-from-a-file.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-02-27-write-batch-with-index.markdown b/docs/_posts/2015-02-27-write-batch-with-index.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-02-27-write-batch-with-index.markdown rename to docs/_posts/2015-02-27-write-batch-with-index.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-04-22-integrating-rocksdb-with-mongodb-2.markdown b/docs/_posts/2015-04-22-integrating-rocksdb-with-mongodb-2.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-04-22-integrating-rocksdb-with-mongodb-2.markdown rename to docs/_posts/2015-04-22-integrating-rocksdb-with-mongodb-2.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-06-12-rocksdb-in-osquery.markdown b/docs/_posts/2015-06-12-rocksdb-in-osquery.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-06-12-rocksdb-in-osquery.markdown rename to docs/_posts/2015-06-12-rocksdb-in-osquery.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-07-15-rocksdb-2015-h2-roadmap.markdown b/docs/_posts/2015-07-15-rocksdb-2015-h2-roadmap.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-07-15-rocksdb-2015-h2-roadmap.markdown rename to docs/_posts/2015-07-15-rocksdb-2015-h2-roadmap.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-07-17-spatial-indexing-in-rocksdb.markdown b/docs/_posts/2015-07-17-spatial-indexing-in-rocksdb.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-07-17-spatial-indexing-in-rocksdb.markdown rename to docs/_posts/2015-07-17-spatial-indexing-in-rocksdb.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-07-22-rocksdb-is-now-available-in-windows-platform.markdown b/docs/_posts/2015-07-22-rocksdb-is-now-available-in-windows-platform.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-07-22-rocksdb-is-now-available-in-windows-platform.markdown rename to docs/_posts/2015-07-22-rocksdb-is-now-available-in-windows-platform.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-07-23-dynamic-level.markdown b/docs/_posts/2015-07-23-dynamic-level.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-07-23-dynamic-level.markdown rename to docs/_posts/2015-07-23-dynamic-level.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-10-27-getthreadlist.markdown b/docs/_posts/2015-10-27-getthreadlist.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-10-27-getthreadlist.markdown rename to docs/_posts/2015-10-27-getthreadlist.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-11-10-use-checkpoints-for-efficient-snapshots.markdown b/docs/_posts/2015-11-10-use-checkpoints-for-efficient-snapshots.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-11-10-use-checkpoints-for-efficient-snapshots.markdown rename to docs/_posts/2015-11-10-use-checkpoints-for-efficient-snapshots.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2015-11-16-analysis-file-read-latency-by-level.markdown b/docs/_posts/2015-11-16-analysis-file-read-latency-by-level.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2015-11-16-analysis-file-read-latency-by-level.markdown rename to docs/_posts/2015-11-16-analysis-file-read-latency-by-level.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2016-01-29-compaction_pri.markdown b/docs/_posts/2016-01-29-compaction_pri.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2016-01-29-compaction_pri.markdown rename to docs/_posts/2016-01-29-compaction_pri.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2016-02-24-rocksdb-4-2-release.markdown b/docs/_posts/2016-02-24-rocksdb-4-2-release.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2016-02-24-rocksdb-4-2-release.markdown rename to docs/_posts/2016-02-24-rocksdb-4-2-release.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2016-02-25-rocksdb-ama.markdown b/docs/_posts/2016-02-25-rocksdb-ama.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2016-02-25-rocksdb-ama.markdown rename to docs/_posts/2016-02-25-rocksdb-ama.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2016-03-07-rocksdb-options-file.markdown b/docs/_posts/2016-03-07-rocksdb-options-file.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2016-03-07-rocksdb-options-file.markdown rename to docs/_posts/2016-03-07-rocksdb-options-file.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2016-04-26-rocksdb-4-5-1-released.markdown b/docs/_posts/2016-04-26-rocksdb-4-5-1-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2016-04-26-rocksdb-4-5-1-released.markdown rename to docs/_posts/2016-04-26-rocksdb-4-5-1-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2016-07-26-rocksdb-4-8-released.markdown b/docs/_posts/2016-07-26-rocksdb-4-8-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2016-07-26-rocksdb-4-8-released.markdown rename to docs/_posts/2016-07-26-rocksdb-4-8-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2016-09-28-rocksdb-4-11-2-released.markdown b/docs/_posts/2016-09-28-rocksdb-4-11-2-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2016-09-28-rocksdb-4-11-2-released.markdown rename to docs/_posts/2016-09-28-rocksdb-4-11-2-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-01-06-rocksdb-5-0-1-released.markdown b/docs/_posts/2017-01-06-rocksdb-5-0-1-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-01-06-rocksdb-5-0-1-released.markdown rename to docs/_posts/2017-01-06-rocksdb-5-0-1-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-02-07-rocksdb-5-1-2-released.markdown b/docs/_posts/2017-02-07-rocksdb-5-1-2-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-02-07-rocksdb-5-1-2-released.markdown rename to docs/_posts/2017-02-07-rocksdb-5-1-2-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-02-17-bulkoad-ingest-sst-file.markdown b/docs/_posts/2017-02-17-bulkoad-ingest-sst-file.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-02-17-bulkoad-ingest-sst-file.markdown rename to docs/_posts/2017-02-17-bulkoad-ingest-sst-file.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-03-02-rocksdb-5-2-1-released.markdown b/docs/_posts/2017-03-02-rocksdb-5-2-1-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-03-02-rocksdb-5-2-1-released.markdown rename to docs/_posts/2017-03-02-rocksdb-5-2-1-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-05-12-partitioned-index-filter.markdown b/docs/_posts/2017-05-12-partitioned-index-filter.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-05-12-partitioned-index-filter.markdown rename to docs/_posts/2017-05-12-partitioned-index-filter.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-05-14-core-local-stats.markdown b/docs/_posts/2017-05-14-core-local-stats.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-05-14-core-local-stats.markdown rename to docs/_posts/2017-05-14-core-local-stats.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-05-26-rocksdb-5-4-5-released.markdown b/docs/_posts/2017-05-26-rocksdb-5-4-5-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-05-26-rocksdb-5-4-5-released.markdown rename to docs/_posts/2017-05-26-rocksdb-5-4-5-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-06-26-17-level-based-changes.markdown b/docs/_posts/2017-06-26-17-level-based-changes.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-06-26-17-level-based-changes.markdown rename to docs/_posts/2017-06-26-17-level-based-changes.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-06-29-rocksdb-5-5-1-released.markdown b/docs/_posts/2017-06-29-rocksdb-5-5-1-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-06-29-rocksdb-5-5-1-released.markdown rename to docs/_posts/2017-06-29-rocksdb-5-5-1-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown b/docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown rename to docs/_posts/2017-07-25-rocksdb-5-6-1-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-08-24-pinnableslice.markdown b/docs/_posts/2017-08-24-pinnableslice.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-08-24-pinnableslice.markdown rename to docs/_posts/2017-08-24-pinnableslice.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-08-25-flushwal.markdown b/docs/_posts/2017-08-25-flushwal.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-08-25-flushwal.markdown rename to docs/_posts/2017-08-25-flushwal.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-09-28-rocksdb-5-8-released.markdown b/docs/_posts/2017-09-28-rocksdb-5-8-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-09-28-rocksdb-5-8-released.markdown rename to docs/_posts/2017-09-28-rocksdb-5-8-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-12-18-17-auto-tuned-rate-limiter.markdown b/docs/_posts/2017-12-18-17-auto-tuned-rate-limiter.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-12-18-17-auto-tuned-rate-limiter.markdown rename to docs/_posts/2017-12-18-17-auto-tuned-rate-limiter.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2017-12-19-write-prepared-txn.markdown b/docs/_posts/2017-12-19-write-prepared-txn.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2017-12-19-write-prepared-txn.markdown rename to docs/_posts/2017-12-19-write-prepared-txn.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2018-02-05-rocksdb-5-10-2-released.markdown b/docs/_posts/2018-02-05-rocksdb-5-10-2-released.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2018-02-05-rocksdb-5-10-2-released.markdown rename to docs/_posts/2018-02-05-rocksdb-5-10-2-released.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2018-08-01-rocksdb-tuning-advisor.markdown b/docs/_posts/2018-08-01-rocksdb-tuning-advisor.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2018-08-01-rocksdb-tuning-advisor.markdown rename to docs/_posts/2018-08-01-rocksdb-tuning-advisor.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2018-08-23-data-block-hash-index.markdown b/docs/_posts/2018-08-23-data-block-hash-index.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2018-08-23-data-block-hash-index.markdown rename to docs/_posts/2018-08-23-data-block-hash-index.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2018-11-21-delete-range.markdown b/docs/_posts/2018-11-21-delete-range.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2018-11-21-delete-range.markdown rename to docs/_posts/2018-11-21-delete-range.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2019-03-08-format-version-4.markdown b/docs/_posts/2019-03-08-format-version-4.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2019-03-08-format-version-4.markdown rename to docs/_posts/2019-03-08-format-version-4.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2019-08-15-unordered-write.markdown b/docs/_posts/2019-08-15-unordered-write.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2019-08-15-unordered-write.markdown rename to docs/_posts/2019-08-15-unordered-write.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2021-04-12-universal-improvements.markdown b/docs/_posts/2021-04-12-universal-improvements.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2021-04-12-universal-improvements.markdown rename to docs/_posts/2021-04-12-universal-improvements.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2021-05-26-integrated-blob-db.markdown b/docs/_posts/2021-05-26-integrated-blob-db.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2021-05-26-integrated-blob-db.markdown rename to docs/_posts/2021-05-26-integrated-blob-db.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2021-05-26-online-validation.markdown b/docs/_posts/2021-05-26-online-validation.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2021-05-26-online-validation.markdown rename to docs/_posts/2021-05-26-online-validation.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2021-05-27-rocksdb-secondary-cache.markdown b/docs/_posts/2021-05-27-rocksdb-secondary-cache.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2021-05-27-rocksdb-secondary-cache.markdown rename to docs/_posts/2021-05-27-rocksdb-secondary-cache.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2021-05-31-dictionary-compression.markdown b/docs/_posts/2021-05-31-dictionary-compression.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2021-05-31-dictionary-compression.markdown rename to docs/_posts/2021-05-31-dictionary-compression.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2021-12-29-ribbon-filter.markdown b/docs/_posts/2021-12-29-ribbon-filter.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2021-12-29-ribbon-filter.markdown rename to docs/_posts/2021-12-29-ribbon-filter.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2022-07-18-per-key-value-checksum.markdown b/docs/_posts/2022-07-18-per-key-value-checksum.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2022-07-18-per-key-value-checksum.markdown rename to docs/_posts/2022-07-18-per-key-value-checksum.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2022-10-05-lost-buffered-write-recovery.markdown b/docs/_posts/2022-10-05-lost-buffered-write-recovery.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2022-10-05-lost-buffered-write-recovery.markdown rename to docs/_posts/2022-10-05-lost-buffered-write-recovery.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2022-10-07-asynchronous-io-in-rocksdb.markdown b/docs/_posts/2022-10-07-asynchronous-io-in-rocksdb.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2022-10-07-asynchronous-io-in-rocksdb.markdown rename to docs/_posts/2022-10-07-asynchronous-io-in-rocksdb.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2022-10-31-align-compaction-output-file.markdown b/docs/_posts/2022-10-31-align-compaction-output-file.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2022-10-31-align-compaction-output-file.markdown rename to docs/_posts/2022-10-31-align-compaction-output-file.markdown diff --git a/librocksdb-sys/rocksdb/docs/_posts/2022-11-09-time-aware-tiered-storage.markdown b/docs/_posts/2022-11-09-time-aware-tiered-storage.markdown similarity index 100% rename from librocksdb-sys/rocksdb/docs/_posts/2022-11-09-time-aware-tiered-storage.markdown rename to docs/_posts/2022-11-09-time-aware-tiered-storage.markdown diff --git a/librocksdb-sys/rocksdb/docs/_sass/_base.scss b/docs/_sass/_base.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_base.scss rename to docs/_sass/_base.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_blog.scss b/docs/_sass/_blog.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_blog.scss rename to docs/_sass/_blog.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_buttons.scss b/docs/_sass/_buttons.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_buttons.scss rename to docs/_sass/_buttons.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_footer.scss b/docs/_sass/_footer.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_footer.scss rename to docs/_sass/_footer.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_gridBlock.scss b/docs/_sass/_gridBlock.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_gridBlock.scss rename to docs/_sass/_gridBlock.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_header.scss b/docs/_sass/_header.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_header.scss rename to docs/_sass/_header.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_poweredby.scss b/docs/_sass/_poweredby.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_poweredby.scss rename to docs/_sass/_poweredby.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_promo.scss b/docs/_sass/_promo.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_promo.scss rename to docs/_sass/_promo.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_react_docs_nav.scss b/docs/_sass/_react_docs_nav.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_react_docs_nav.scss rename to docs/_sass/_react_docs_nav.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_react_header_nav.scss b/docs/_sass/_react_header_nav.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_react_header_nav.scss rename to docs/_sass/_react_header_nav.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_reset.scss b/docs/_sass/_reset.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_reset.scss rename to docs/_sass/_reset.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_search.scss b/docs/_sass/_search.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_search.scss rename to docs/_sass/_search.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_slideshow.scss b/docs/_sass/_slideshow.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_slideshow.scss rename to docs/_sass/_slideshow.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_syntax-highlighting.scss b/docs/_sass/_syntax-highlighting.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_syntax-highlighting.scss rename to docs/_sass/_syntax-highlighting.scss diff --git a/librocksdb-sys/rocksdb/docs/_sass/_tables.scss b/docs/_sass/_tables.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/_sass/_tables.scss rename to docs/_sass/_tables.scss diff --git a/librocksdb-sys/rocksdb/docs/_top-level/support.md b/docs/_top-level/support.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/_top-level/support.md rename to docs/_top-level/support.md diff --git a/librocksdb-sys/rocksdb/docs/blog/all.html b/docs/blog/all.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/blog/all.html rename to docs/blog/all.html diff --git a/librocksdb-sys/rocksdb/docs/blog/index.html b/docs/blog/index.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/blog/index.html rename to docs/blog/index.html diff --git a/librocksdb-sys/rocksdb/docs/css/main.scss b/docs/css/main.scss similarity index 100% rename from librocksdb-sys/rocksdb/docs/css/main.scss rename to docs/css/main.scss diff --git a/librocksdb-sys/rocksdb/docs/doc-type-examples/2016-04-07-blog-post-example.md b/docs/doc-type-examples/2016-04-07-blog-post-example.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/doc-type-examples/2016-04-07-blog-post-example.md rename to docs/doc-type-examples/2016-04-07-blog-post-example.md diff --git a/librocksdb-sys/rocksdb/docs/doc-type-examples/docs-hello-world.md b/docs/doc-type-examples/docs-hello-world.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/doc-type-examples/docs-hello-world.md rename to docs/doc-type-examples/docs-hello-world.md diff --git a/librocksdb-sys/rocksdb/docs/doc-type-examples/top-level-example.md b/docs/doc-type-examples/top-level-example.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/doc-type-examples/top-level-example.md rename to docs/doc-type-examples/top-level-example.md diff --git a/librocksdb-sys/rocksdb/docs/docs/index.html b/docs/docs/index.html similarity index 100% rename from librocksdb-sys/rocksdb/docs/docs/index.html rename to docs/docs/index.html diff --git a/librocksdb-sys/rocksdb/docs/feed.xml b/docs/feed.xml similarity index 100% rename from librocksdb-sys/rocksdb/docs/feed.xml rename to docs/feed.xml diff --git a/librocksdb-sys/rocksdb/docs/index.md b/docs/index.md similarity index 100% rename from librocksdb-sys/rocksdb/docs/index.md rename to docs/index.md diff --git a/librocksdb-sys/rocksdb/docs/static/favicon.png b/docs/static/favicon.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/favicon.png rename to docs/static/favicon.png diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Black.woff b/docs/static/fonts/LatoLatin-Black.woff similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Black.woff rename to docs/static/fonts/LatoLatin-Black.woff diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Black.woff2 b/docs/static/fonts/LatoLatin-Black.woff2 similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Black.woff2 rename to docs/static/fonts/LatoLatin-Black.woff2 diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-BlackItalic.woff b/docs/static/fonts/LatoLatin-BlackItalic.woff similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-BlackItalic.woff rename to docs/static/fonts/LatoLatin-BlackItalic.woff diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-BlackItalic.woff2 b/docs/static/fonts/LatoLatin-BlackItalic.woff2 similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-BlackItalic.woff2 rename to docs/static/fonts/LatoLatin-BlackItalic.woff2 diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Italic.woff b/docs/static/fonts/LatoLatin-Italic.woff similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Italic.woff rename to docs/static/fonts/LatoLatin-Italic.woff diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Italic.woff2 b/docs/static/fonts/LatoLatin-Italic.woff2 similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Italic.woff2 rename to docs/static/fonts/LatoLatin-Italic.woff2 diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Light.woff b/docs/static/fonts/LatoLatin-Light.woff similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Light.woff rename to docs/static/fonts/LatoLatin-Light.woff diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Light.woff2 b/docs/static/fonts/LatoLatin-Light.woff2 similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Light.woff2 rename to docs/static/fonts/LatoLatin-Light.woff2 diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Regular.woff b/docs/static/fonts/LatoLatin-Regular.woff similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Regular.woff rename to docs/static/fonts/LatoLatin-Regular.woff diff --git a/librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Regular.woff2 b/docs/static/fonts/LatoLatin-Regular.woff2 similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/fonts/LatoLatin-Regular.woff2 rename to docs/static/fonts/LatoLatin-Regular.woff2 diff --git a/librocksdb-sys/rocksdb/docs/static/images/Resize-of-20140327_200754-300x225.jpg b/docs/static/images/Resize-of-20140327_200754-300x225.jpg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/Resize-of-20140327_200754-300x225.jpg rename to docs/static/images/Resize-of-20140327_200754-300x225.jpg diff --git a/librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/compaction_output_file_size_compare.png b/docs/static/images/align-compaction-output/compaction_output_file_size_compare.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/compaction_output_file_size_compare.png rename to docs/static/images/align-compaction-output/compaction_output_file_size_compare.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_cut_align.png b/docs/static/images/align-compaction-output/file_cut_align.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_cut_align.png rename to docs/static/images/align-compaction-output/file_cut_align.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_cut_normal.png b/docs/static/images/align-compaction-output/file_cut_normal.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_cut_normal.png rename to docs/static/images/align-compaction-output/file_cut_normal.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_cut_trival_move.png b/docs/static/images/align-compaction-output/file_cut_trival_move.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_cut_trival_move.png rename to docs/static/images/align-compaction-output/file_cut_trival_move.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_size_compare.png b/docs/static/images/align-compaction-output/file_size_compare.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/file_size_compare.png rename to docs/static/images/align-compaction-output/file_size_compare.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/write_amp_compare.png b/docs/static/images/align-compaction-output/write_amp_compare.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/align-compaction-output/write_amp_compare.png rename to docs/static/images/align-compaction-output/write_amp_compare.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/asynchronous-io/mget_async.png b/docs/static/images/asynchronous-io/mget_async.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/asynchronous-io/mget_async.png rename to docs/static/images/asynchronous-io/mget_async.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/asynchronous-io/scan_async.png b/docs/static/images/asynchronous-io/scan_async.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/asynchronous-io/scan_async.png rename to docs/static/images/asynchronous-io/scan_async.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/binaryseek.png b/docs/static/images/binaryseek.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/binaryseek.png rename to docs/static/images/binaryseek.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/bloom_fp_vs_bpk.png b/docs/static/images/bloom_fp_vs_bpk.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/bloom_fp_vs_bpk.png rename to docs/static/images/bloom_fp_vs_bpk.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/compaction/full-range.png b/docs/static/images/compaction/full-range.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/compaction/full-range.png rename to docs/static/images/compaction/full-range.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/compaction/l0-l1-contend.png b/docs/static/images/compaction/l0-l1-contend.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/compaction/l0-l1-contend.png rename to docs/static/images/compaction/l0-l1-contend.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/compaction/l1-l2-contend.png b/docs/static/images/compaction/l1-l2-contend.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/compaction/l1-l2-contend.png rename to docs/static/images/compaction/l1-l2-contend.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/compaction/part-range-old.png b/docs/static/images/compaction/part-range-old.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/compaction/part-range-old.png rename to docs/static/images/compaction/part-range-old.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/block-format-binary-seek.png b/docs/static/images/data-block-hash-index/block-format-binary-seek.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/block-format-binary-seek.png rename to docs/static/images/data-block-hash-index/block-format-binary-seek.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/block-format-hash-index.png b/docs/static/images/data-block-hash-index/block-format-hash-index.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/block-format-hash-index.png rename to docs/static/images/data-block-hash-index/block-format-hash-index.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/hash-index-data-structure.png b/docs/static/images/data-block-hash-index/hash-index-data-structure.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/hash-index-data-structure.png rename to docs/static/images/data-block-hash-index/hash-index-data-structure.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/perf-cache-miss.png b/docs/static/images/data-block-hash-index/perf-cache-miss.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/perf-cache-miss.png rename to docs/static/images/data-block-hash-index/perf-cache-miss.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/perf-throughput.png b/docs/static/images/data-block-hash-index/perf-throughput.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/data-block-hash-index/perf-throughput.png rename to docs/static/images/data-block-hash-index/perf-throughput.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_collapsed.png b/docs/static/images/delrange/delrange_collapsed.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_collapsed.png rename to docs/static/images/delrange/delrange_collapsed.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_key_schema.png b/docs/static/images/delrange/delrange_key_schema.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_key_schema.png rename to docs/static/images/delrange/delrange_key_schema.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_sst_blocks.png b/docs/static/images/delrange/delrange_sst_blocks.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_sst_blocks.png rename to docs/static/images/delrange/delrange_sst_blocks.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_uncollapsed.png b/docs/static/images/delrange/delrange_uncollapsed.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_uncollapsed.png rename to docs/static/images/delrange/delrange_uncollapsed.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_write_path.png b/docs/static/images/delrange/delrange_write_path.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/delrange/delrange_write_path.png rename to docs/static/images/delrange/delrange_write_path.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/dictcmp/dictcmp_raw_sampled.png b/docs/static/images/dictcmp/dictcmp_raw_sampled.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/dictcmp/dictcmp_raw_sampled.png rename to docs/static/images/dictcmp/dictcmp_raw_sampled.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/dictcmp/dictcmp_sst_blocks.png b/docs/static/images/dictcmp/dictcmp_sst_blocks.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/dictcmp/dictcmp_sst_blocks.png rename to docs/static/images/dictcmp/dictcmp_sst_blocks.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/dictcmp/dictcmp_zstd_trained.png b/docs/static/images/dictcmp/dictcmp_zstd_trained.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/dictcmp/dictcmp_zstd_trained.png rename to docs/static/images/dictcmp/dictcmp_zstd_trained.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Legacy_Vs_Integrated.png b/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Legacy_Vs_Integrated.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Legacy_Vs_Integrated.png rename to docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Legacy_Vs_Integrated.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_RW_RO_Perf.png b/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_RW_RO_Perf.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_RW_RO_Perf.png rename to docs/static/images/integrated-blob-db/BlobDB_Benchmarks_RW_RO_Perf.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Amp.png b/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Amp.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Amp.png rename to docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Amp.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Perf.png b/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Perf.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Perf.png rename to docs/static/images/integrated-blob-db/BlobDB_Benchmarks_Write_Perf.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Memtable-entry.png b/docs/static/images/kv-checksum/Memtable-entry.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Memtable-entry.png rename to docs/static/images/kv-checksum/Memtable-entry.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Memtable-write.png b/docs/static/images/kv-checksum/Memtable-write.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Memtable-write.png rename to docs/static/images/kv-checksum/Memtable-write.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/ProtInfo-Memtable.png b/docs/static/images/kv-checksum/ProtInfo-Memtable.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/ProtInfo-Memtable.png rename to docs/static/images/kv-checksum/ProtInfo-Memtable.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/ProtInfo-Writebatch-to-Memtable.png b/docs/static/images/kv-checksum/ProtInfo-Writebatch-to-Memtable.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/ProtInfo-Writebatch-to-Memtable.png rename to docs/static/images/kv-checksum/ProtInfo-Writebatch-to-Memtable.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/ProtInfo-Writebatch.png b/docs/static/images/kv-checksum/ProtInfo-Writebatch.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/ProtInfo-Writebatch.png rename to docs/static/images/kv-checksum/ProtInfo-Writebatch.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/WAL-fragment.png b/docs/static/images/kv-checksum/WAL-fragment.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/WAL-fragment.png rename to docs/static/images/kv-checksum/WAL-fragment.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/WAL-read.png b/docs/static/images/kv-checksum/WAL-read.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/WAL-read.png rename to docs/static/images/kv-checksum/WAL-read.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/WAL-write.png b/docs/static/images/kv-checksum/WAL-write.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/WAL-write.png rename to docs/static/images/kv-checksum/WAL-write.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Write-batch.png b/docs/static/images/kv-checksum/Write-batch.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Write-batch.png rename to docs/static/images/kv-checksum/Write-batch.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Writebatch-write.png b/docs/static/images/kv-checksum/Writebatch-write.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/kv-checksum/Writebatch-write.png rename to docs/static/images/kv-checksum/Writebatch-write.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/angry-cat.png b/docs/static/images/lost-buffered-write-recovery/angry-cat.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/angry-cat.png rename to docs/static/images/lost-buffered-write-recovery/angry-cat.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/basic-setup.png b/docs/static/images/lost-buffered-write-recovery/basic-setup.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/basic-setup.png rename to docs/static/images/lost-buffered-write-recovery/basic-setup.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/happy-cat.png b/docs/static/images/lost-buffered-write-recovery/happy-cat.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/happy-cat.png rename to docs/static/images/lost-buffered-write-recovery/happy-cat.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/replay-extension.png b/docs/static/images/lost-buffered-write-recovery/replay-extension.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/replay-extension.png rename to docs/static/images/lost-buffered-write-recovery/replay-extension.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/test-fs-writable-file.png b/docs/static/images/lost-buffered-write-recovery/test-fs-writable-file.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/test-fs-writable-file.png rename to docs/static/images/lost-buffered-write-recovery/test-fs-writable-file.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/trace-extension.png b/docs/static/images/lost-buffered-write-recovery/trace-extension.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/lost-buffered-write-recovery/trace-extension.png rename to docs/static/images/lost-buffered-write-recovery/trace-extension.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/pcache-blockindex.jpg b/docs/static/images/pcache-blockindex.jpg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/pcache-blockindex.jpg rename to docs/static/images/pcache-blockindex.jpg diff --git a/librocksdb-sys/rocksdb/docs/static/images/pcache-fileindex.jpg b/docs/static/images/pcache-fileindex.jpg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/pcache-fileindex.jpg rename to docs/static/images/pcache-fileindex.jpg diff --git a/librocksdb-sys/rocksdb/docs/static/images/pcache-filelayout.jpg b/docs/static/images/pcache-filelayout.jpg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/pcache-filelayout.jpg rename to docs/static/images/pcache-filelayout.jpg diff --git a/librocksdb-sys/rocksdb/docs/static/images/pcache-readiopath.jpg b/docs/static/images/pcache-readiopath.jpg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/pcache-readiopath.jpg rename to docs/static/images/pcache-readiopath.jpg diff --git a/librocksdb-sys/rocksdb/docs/static/images/pcache-tieredstorage.jpg b/docs/static/images/pcache-tieredstorage.jpg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/pcache-tieredstorage.jpg rename to docs/static/images/pcache-tieredstorage.jpg diff --git a/librocksdb-sys/rocksdb/docs/static/images/pcache-writeiopath.jpg b/docs/static/images/pcache-writeiopath.jpg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/pcache-writeiopath.jpg rename to docs/static/images/pcache-writeiopath.jpg diff --git a/librocksdb-sys/rocksdb/docs/static/images/promo-adapt.svg b/docs/static/images/promo-adapt.svg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/promo-adapt.svg rename to docs/static/images/promo-adapt.svg diff --git a/librocksdb-sys/rocksdb/docs/static/images/promo-flash.svg b/docs/static/images/promo-flash.svg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/promo-flash.svg rename to docs/static/images/promo-flash.svg diff --git a/librocksdb-sys/rocksdb/docs/static/images/promo-operations.svg b/docs/static/images/promo-operations.svg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/promo-operations.svg rename to docs/static/images/promo-operations.svg diff --git a/librocksdb-sys/rocksdb/docs/static/images/promo-performance.svg b/docs/static/images/promo-performance.svg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/promo-performance.svg rename to docs/static/images/promo-performance.svg diff --git a/librocksdb-sys/rocksdb/docs/static/images/rate-limiter/auto-tuned-write-KBps-series.png b/docs/static/images/rate-limiter/auto-tuned-write-KBps-series.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rate-limiter/auto-tuned-write-KBps-series.png rename to docs/static/images/rate-limiter/auto-tuned-write-KBps-series.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/rate-limiter/write-KBps-cdf.png b/docs/static/images/rate-limiter/write-KBps-cdf.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rate-limiter/write-KBps-cdf.png rename to docs/static/images/rate-limiter/write-KBps-cdf.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/rate-limiter/write-KBps-series.png b/docs/static/images/rate-limiter/write-KBps-series.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rate-limiter/write-KBps-series.png rename to docs/static/images/rate-limiter/write-KBps-series.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/Mixgraph_hit_rate.png b/docs/static/images/rocksdb-secondary-cache/Mixgraph_hit_rate.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/Mixgraph_hit_rate.png rename to docs/static/images/rocksdb-secondary-cache/Mixgraph_hit_rate.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/Mixgraph_throughput.png b/docs/static/images/rocksdb-secondary-cache/Mixgraph_throughput.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/Mixgraph_throughput.png rename to docs/static/images/rocksdb-secondary-cache/Mixgraph_throughput.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/arch_diagram.png b/docs/static/images/rocksdb-secondary-cache/arch_diagram.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/arch_diagram.png rename to docs/static/images/rocksdb-secondary-cache/arch_diagram.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/insert_flow.png b/docs/static/images/rocksdb-secondary-cache/insert_flow.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/insert_flow.png rename to docs/static/images/rocksdb-secondary-cache/insert_flow.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/lookup_flow.png b/docs/static/images/rocksdb-secondary-cache/lookup_flow.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/rocksdb-secondary-cache/lookup_flow.png rename to docs/static/images/rocksdb-secondary-cache/lookup_flow.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/compaction_moving_up_conflict.png b/docs/static/images/time-aware-tiered-storage/compaction_moving_up_conflict.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/compaction_moving_up_conflict.png rename to docs/static/images/time-aware-tiered-storage/compaction_moving_up_conflict.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/per_key_placement_compaction.png b/docs/static/images/time-aware-tiered-storage/per_key_placement_compaction.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/per_key_placement_compaction.png rename to docs/static/images/time-aware-tiered-storage/per_key_placement_compaction.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/tiered_storage_design.png b/docs/static/images/time-aware-tiered-storage/tiered_storage_design.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/tiered_storage_design.png rename to docs/static/images/time-aware-tiered-storage/tiered_storage_design.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/tiered_storage_overview.png b/docs/static/images/time-aware-tiered-storage/tiered_storage_overview.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/tiered_storage_overview.png rename to docs/static/images/time-aware-tiered-storage/tiered_storage_overview.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/tiered_storage_problem.png b/docs/static/images/time-aware-tiered-storage/tiered_storage_problem.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/time-aware-tiered-storage/tiered_storage_problem.png rename to docs/static/images/time-aware-tiered-storage/tiered_storage_problem.png diff --git a/librocksdb-sys/rocksdb/docs/static/images/tree_example1.png b/docs/static/images/tree_example1.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/images/tree_example1.png rename to docs/static/images/tree_example1.png diff --git a/librocksdb-sys/rocksdb/docs/static/logo.svg b/docs/static/logo.svg similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/logo.svg rename to docs/static/logo.svg diff --git a/librocksdb-sys/rocksdb/docs/static/og_image.png b/docs/static/og_image.png similarity index 100% rename from librocksdb-sys/rocksdb/docs/static/og_image.png rename to docs/static/og_image.png diff --git a/librocksdb-sys/rocksdb/env/composite_env.cc b/env/composite_env.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/composite_env.cc rename to env/composite_env.cc diff --git a/librocksdb-sys/rocksdb/env/composite_env_wrapper.h b/env/composite_env_wrapper.h similarity index 100% rename from librocksdb-sys/rocksdb/env/composite_env_wrapper.h rename to env/composite_env_wrapper.h diff --git a/librocksdb-sys/rocksdb/env/emulated_clock.h b/env/emulated_clock.h similarity index 100% rename from librocksdb-sys/rocksdb/env/emulated_clock.h rename to env/emulated_clock.h diff --git a/librocksdb-sys/rocksdb/env/env.cc b/env/env.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/env.cc rename to env/env.cc diff --git a/librocksdb-sys/rocksdb/env/env_basic_test.cc b/env/env_basic_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/env_basic_test.cc rename to env/env_basic_test.cc diff --git a/librocksdb-sys/rocksdb/env/env_chroot.cc b/env/env_chroot.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/env_chroot.cc rename to env/env_chroot.cc diff --git a/librocksdb-sys/rocksdb/env/env_chroot.h b/env/env_chroot.h similarity index 100% rename from librocksdb-sys/rocksdb/env/env_chroot.h rename to env/env_chroot.h diff --git a/librocksdb-sys/rocksdb/env/env_encryption.cc b/env/env_encryption.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/env_encryption.cc rename to env/env_encryption.cc diff --git a/librocksdb-sys/rocksdb/env/env_encryption_ctr.h b/env/env_encryption_ctr.h similarity index 100% rename from librocksdb-sys/rocksdb/env/env_encryption_ctr.h rename to env/env_encryption_ctr.h diff --git a/librocksdb-sys/rocksdb/env/env_posix.cc b/env/env_posix.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/env_posix.cc rename to env/env_posix.cc diff --git a/librocksdb-sys/rocksdb/env/env_test.cc b/env/env_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/env_test.cc rename to env/env_test.cc diff --git a/librocksdb-sys/rocksdb/env/file_system.cc b/env/file_system.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/file_system.cc rename to env/file_system.cc diff --git a/librocksdb-sys/rocksdb/env/file_system_tracer.cc b/env/file_system_tracer.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/file_system_tracer.cc rename to env/file_system_tracer.cc diff --git a/librocksdb-sys/rocksdb/env/file_system_tracer.h b/env/file_system_tracer.h similarity index 100% rename from librocksdb-sys/rocksdb/env/file_system_tracer.h rename to env/file_system_tracer.h diff --git a/librocksdb-sys/rocksdb/env/fs_posix.cc b/env/fs_posix.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/fs_posix.cc rename to env/fs_posix.cc diff --git a/librocksdb-sys/rocksdb/env/fs_readonly.h b/env/fs_readonly.h similarity index 100% rename from librocksdb-sys/rocksdb/env/fs_readonly.h rename to env/fs_readonly.h diff --git a/librocksdb-sys/rocksdb/env/fs_remap.cc b/env/fs_remap.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/fs_remap.cc rename to env/fs_remap.cc diff --git a/librocksdb-sys/rocksdb/env/fs_remap.h b/env/fs_remap.h similarity index 100% rename from librocksdb-sys/rocksdb/env/fs_remap.h rename to env/fs_remap.h diff --git a/librocksdb-sys/rocksdb/env/io_posix.cc b/env/io_posix.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/io_posix.cc rename to env/io_posix.cc diff --git a/librocksdb-sys/rocksdb/env/io_posix.h b/env/io_posix.h similarity index 100% rename from librocksdb-sys/rocksdb/env/io_posix.h rename to env/io_posix.h diff --git a/librocksdb-sys/rocksdb/env/io_posix_test.cc b/env/io_posix_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/io_posix_test.cc rename to env/io_posix_test.cc diff --git a/librocksdb-sys/rocksdb/env/mock_env.cc b/env/mock_env.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/mock_env.cc rename to env/mock_env.cc diff --git a/librocksdb-sys/rocksdb/env/mock_env.h b/env/mock_env.h similarity index 100% rename from librocksdb-sys/rocksdb/env/mock_env.h rename to env/mock_env.h diff --git a/librocksdb-sys/rocksdb/env/mock_env_test.cc b/env/mock_env_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/mock_env_test.cc rename to env/mock_env_test.cc diff --git a/librocksdb-sys/rocksdb/env/unique_id_gen.cc b/env/unique_id_gen.cc similarity index 100% rename from librocksdb-sys/rocksdb/env/unique_id_gen.cc rename to env/unique_id_gen.cc diff --git a/librocksdb-sys/rocksdb/env/unique_id_gen.h b/env/unique_id_gen.h similarity index 100% rename from librocksdb-sys/rocksdb/env/unique_id_gen.h rename to env/unique_id_gen.h diff --git a/librocksdb-sys/rocksdb/examples/.gitignore b/examples/.gitignore similarity index 100% rename from librocksdb-sys/rocksdb/examples/.gitignore rename to examples/.gitignore diff --git a/librocksdb-sys/rocksdb/examples/CMakeLists.txt b/examples/CMakeLists.txt similarity index 100% rename from librocksdb-sys/rocksdb/examples/CMakeLists.txt rename to examples/CMakeLists.txt diff --git a/librocksdb-sys/rocksdb/examples/Makefile b/examples/Makefile similarity index 100% rename from librocksdb-sys/rocksdb/examples/Makefile rename to examples/Makefile diff --git a/librocksdb-sys/rocksdb/examples/README.md b/examples/README.md similarity index 100% rename from librocksdb-sys/rocksdb/examples/README.md rename to examples/README.md diff --git a/librocksdb-sys/rocksdb/examples/c_simple_example.c b/examples/c_simple_example.c similarity index 100% rename from librocksdb-sys/rocksdb/examples/c_simple_example.c rename to examples/c_simple_example.c diff --git a/librocksdb-sys/rocksdb/examples/column_families_example.cc b/examples/column_families_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/column_families_example.cc rename to examples/column_families_example.cc diff --git a/librocksdb-sys/rocksdb/examples/compact_files_example.cc b/examples/compact_files_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/compact_files_example.cc rename to examples/compact_files_example.cc diff --git a/librocksdb-sys/rocksdb/examples/compaction_filter_example.cc b/examples/compaction_filter_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/compaction_filter_example.cc rename to examples/compaction_filter_example.cc diff --git a/librocksdb-sys/rocksdb/examples/multi_processes_example.cc b/examples/multi_processes_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/multi_processes_example.cc rename to examples/multi_processes_example.cc diff --git a/librocksdb-sys/rocksdb/examples/optimistic_transaction_example.cc b/examples/optimistic_transaction_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/optimistic_transaction_example.cc rename to examples/optimistic_transaction_example.cc diff --git a/librocksdb-sys/rocksdb/examples/options_file_example.cc b/examples/options_file_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/options_file_example.cc rename to examples/options_file_example.cc diff --git a/librocksdb-sys/rocksdb/examples/rocksdb_backup_restore_example.cc b/examples/rocksdb_backup_restore_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/rocksdb_backup_restore_example.cc rename to examples/rocksdb_backup_restore_example.cc diff --git a/librocksdb-sys/rocksdb/examples/rocksdb_option_file_example.ini b/examples/rocksdb_option_file_example.ini similarity index 100% rename from librocksdb-sys/rocksdb/examples/rocksdb_option_file_example.ini rename to examples/rocksdb_option_file_example.ini diff --git a/librocksdb-sys/rocksdb/examples/simple_example.cc b/examples/simple_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/simple_example.cc rename to examples/simple_example.cc diff --git a/librocksdb-sys/rocksdb/examples/transaction_example.cc b/examples/transaction_example.cc similarity index 100% rename from librocksdb-sys/rocksdb/examples/transaction_example.cc rename to examples/transaction_example.cc diff --git a/librocksdb-sys/rocksdb/file/delete_scheduler.cc b/file/delete_scheduler.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/delete_scheduler.cc rename to file/delete_scheduler.cc diff --git a/librocksdb-sys/rocksdb/file/delete_scheduler.h b/file/delete_scheduler.h similarity index 100% rename from librocksdb-sys/rocksdb/file/delete_scheduler.h rename to file/delete_scheduler.h diff --git a/librocksdb-sys/rocksdb/file/delete_scheduler_test.cc b/file/delete_scheduler_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/delete_scheduler_test.cc rename to file/delete_scheduler_test.cc diff --git a/librocksdb-sys/rocksdb/file/file_prefetch_buffer.cc b/file/file_prefetch_buffer.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/file_prefetch_buffer.cc rename to file/file_prefetch_buffer.cc diff --git a/librocksdb-sys/rocksdb/file/file_prefetch_buffer.h b/file/file_prefetch_buffer.h similarity index 100% rename from librocksdb-sys/rocksdb/file/file_prefetch_buffer.h rename to file/file_prefetch_buffer.h diff --git a/librocksdb-sys/rocksdb/file/file_util.cc b/file/file_util.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/file_util.cc rename to file/file_util.cc diff --git a/librocksdb-sys/rocksdb/file/file_util.h b/file/file_util.h similarity index 100% rename from librocksdb-sys/rocksdb/file/file_util.h rename to file/file_util.h diff --git a/librocksdb-sys/rocksdb/file/filename.cc b/file/filename.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/filename.cc rename to file/filename.cc diff --git a/librocksdb-sys/rocksdb/file/filename.h b/file/filename.h similarity index 100% rename from librocksdb-sys/rocksdb/file/filename.h rename to file/filename.h diff --git a/librocksdb-sys/rocksdb/file/line_file_reader.cc b/file/line_file_reader.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/line_file_reader.cc rename to file/line_file_reader.cc diff --git a/librocksdb-sys/rocksdb/file/line_file_reader.h b/file/line_file_reader.h similarity index 100% rename from librocksdb-sys/rocksdb/file/line_file_reader.h rename to file/line_file_reader.h diff --git a/librocksdb-sys/rocksdb/file/prefetch_test.cc b/file/prefetch_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/prefetch_test.cc rename to file/prefetch_test.cc diff --git a/librocksdb-sys/rocksdb/file/random_access_file_reader.cc b/file/random_access_file_reader.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/random_access_file_reader.cc rename to file/random_access_file_reader.cc diff --git a/librocksdb-sys/rocksdb/file/random_access_file_reader.h b/file/random_access_file_reader.h similarity index 100% rename from librocksdb-sys/rocksdb/file/random_access_file_reader.h rename to file/random_access_file_reader.h diff --git a/librocksdb-sys/rocksdb/file/random_access_file_reader_test.cc b/file/random_access_file_reader_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/random_access_file_reader_test.cc rename to file/random_access_file_reader_test.cc diff --git a/librocksdb-sys/rocksdb/file/read_write_util.cc b/file/read_write_util.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/read_write_util.cc rename to file/read_write_util.cc diff --git a/librocksdb-sys/rocksdb/file/read_write_util.h b/file/read_write_util.h similarity index 100% rename from librocksdb-sys/rocksdb/file/read_write_util.h rename to file/read_write_util.h diff --git a/librocksdb-sys/rocksdb/file/readahead_file_info.h b/file/readahead_file_info.h similarity index 100% rename from librocksdb-sys/rocksdb/file/readahead_file_info.h rename to file/readahead_file_info.h diff --git a/librocksdb-sys/rocksdb/file/readahead_raf.cc b/file/readahead_raf.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/readahead_raf.cc rename to file/readahead_raf.cc diff --git a/librocksdb-sys/rocksdb/file/readahead_raf.h b/file/readahead_raf.h similarity index 100% rename from librocksdb-sys/rocksdb/file/readahead_raf.h rename to file/readahead_raf.h diff --git a/librocksdb-sys/rocksdb/file/sequence_file_reader.cc b/file/sequence_file_reader.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/sequence_file_reader.cc rename to file/sequence_file_reader.cc diff --git a/librocksdb-sys/rocksdb/file/sequence_file_reader.h b/file/sequence_file_reader.h similarity index 100% rename from librocksdb-sys/rocksdb/file/sequence_file_reader.h rename to file/sequence_file_reader.h diff --git a/librocksdb-sys/rocksdb/file/sst_file_manager_impl.cc b/file/sst_file_manager_impl.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/sst_file_manager_impl.cc rename to file/sst_file_manager_impl.cc diff --git a/librocksdb-sys/rocksdb/file/sst_file_manager_impl.h b/file/sst_file_manager_impl.h similarity index 100% rename from librocksdb-sys/rocksdb/file/sst_file_manager_impl.h rename to file/sst_file_manager_impl.h diff --git a/librocksdb-sys/rocksdb/file/writable_file_writer.cc b/file/writable_file_writer.cc similarity index 100% rename from librocksdb-sys/rocksdb/file/writable_file_writer.cc rename to file/writable_file_writer.cc diff --git a/librocksdb-sys/rocksdb/file/writable_file_writer.h b/file/writable_file_writer.h similarity index 100% rename from librocksdb-sys/rocksdb/file/writable_file_writer.h rename to file/writable_file_writer.h diff --git a/librocksdb-sys/rocksdb/fuzz/.gitignore b/fuzz/.gitignore similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/.gitignore rename to fuzz/.gitignore diff --git a/librocksdb-sys/rocksdb/fuzz/Makefile b/fuzz/Makefile similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/Makefile rename to fuzz/Makefile diff --git a/librocksdb-sys/rocksdb/fuzz/README.md b/fuzz/README.md similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/README.md rename to fuzz/README.md diff --git a/librocksdb-sys/rocksdb/fuzz/db_fuzzer.cc b/fuzz/db_fuzzer.cc similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/db_fuzzer.cc rename to fuzz/db_fuzzer.cc diff --git a/librocksdb-sys/rocksdb/fuzz/db_map_fuzzer.cc b/fuzz/db_map_fuzzer.cc similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/db_map_fuzzer.cc rename to fuzz/db_map_fuzzer.cc diff --git a/librocksdb-sys/rocksdb/fuzz/proto/db_operation.proto b/fuzz/proto/db_operation.proto similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/proto/db_operation.proto rename to fuzz/proto/db_operation.proto diff --git a/librocksdb-sys/rocksdb/fuzz/sst_file_writer_fuzzer.cc b/fuzz/sst_file_writer_fuzzer.cc similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/sst_file_writer_fuzzer.cc rename to fuzz/sst_file_writer_fuzzer.cc diff --git a/librocksdb-sys/rocksdb/fuzz/util.h b/fuzz/util.h similarity index 100% rename from librocksdb-sys/rocksdb/fuzz/util.h rename to fuzz/util.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/advanced_cache.h b/include/rocksdb/advanced_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/advanced_cache.h rename to include/rocksdb/advanced_cache.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/advanced_options.h b/include/rocksdb/advanced_options.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/advanced_options.h rename to include/rocksdb/advanced_options.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/block_cache_trace_writer.h b/include/rocksdb/block_cache_trace_writer.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/block_cache_trace_writer.h rename to include/rocksdb/block_cache_trace_writer.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/c.h b/include/rocksdb/c.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/c.h rename to include/rocksdb/c.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/cache.h b/include/rocksdb/cache.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/cache.h rename to include/rocksdb/cache.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/cache_bench_tool.h b/include/rocksdb/cache_bench_tool.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/cache_bench_tool.h rename to include/rocksdb/cache_bench_tool.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/cleanable.h b/include/rocksdb/cleanable.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/cleanable.h rename to include/rocksdb/cleanable.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/compaction_filter.h rename to include/rocksdb/compaction_filter.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/compaction_job_stats.h b/include/rocksdb/compaction_job_stats.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/compaction_job_stats.h rename to include/rocksdb/compaction_job_stats.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/comparator.h b/include/rocksdb/comparator.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/comparator.h rename to include/rocksdb/comparator.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/compression_type.h b/include/rocksdb/compression_type.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/compression_type.h rename to include/rocksdb/compression_type.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/concurrent_task_limiter.h b/include/rocksdb/concurrent_task_limiter.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/concurrent_task_limiter.h rename to include/rocksdb/concurrent_task_limiter.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/configurable.h b/include/rocksdb/configurable.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/configurable.h rename to include/rocksdb/configurable.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/convenience.h b/include/rocksdb/convenience.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/convenience.h rename to include/rocksdb/convenience.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/customizable.h b/include/rocksdb/customizable.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/customizable.h rename to include/rocksdb/customizable.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/data_structure.h b/include/rocksdb/data_structure.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/data_structure.h rename to include/rocksdb/data_structure.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/db.h b/include/rocksdb/db.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/db.h rename to include/rocksdb/db.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/db_bench_tool.h b/include/rocksdb/db_bench_tool.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/db_bench_tool.h rename to include/rocksdb/db_bench_tool.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/db_dump_tool.h b/include/rocksdb/db_dump_tool.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/db_dump_tool.h rename to include/rocksdb/db_dump_tool.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/db_stress_tool.h b/include/rocksdb/db_stress_tool.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/db_stress_tool.h rename to include/rocksdb/db_stress_tool.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/env.h b/include/rocksdb/env.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/env.h rename to include/rocksdb/env.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/env_encryption.h b/include/rocksdb/env_encryption.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/env_encryption.h rename to include/rocksdb/env_encryption.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/experimental.h b/include/rocksdb/experimental.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/experimental.h rename to include/rocksdb/experimental.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/file_checksum.h b/include/rocksdb/file_checksum.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/file_checksum.h rename to include/rocksdb/file_checksum.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/file_system.h b/include/rocksdb/file_system.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/file_system.h rename to include/rocksdb/file_system.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/filter_policy.h rename to include/rocksdb/filter_policy.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/flush_block_policy.h b/include/rocksdb/flush_block_policy.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/flush_block_policy.h rename to include/rocksdb/flush_block_policy.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/functor_wrapper.h b/include/rocksdb/functor_wrapper.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/functor_wrapper.h rename to include/rocksdb/functor_wrapper.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/io_status.h b/include/rocksdb/io_status.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/io_status.h rename to include/rocksdb/io_status.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/iostats_context.h b/include/rocksdb/iostats_context.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/iostats_context.h rename to include/rocksdb/iostats_context.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/iterator.h b/include/rocksdb/iterator.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/iterator.h rename to include/rocksdb/iterator.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/ldb_tool.h b/include/rocksdb/ldb_tool.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/ldb_tool.h rename to include/rocksdb/ldb_tool.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/listener.h b/include/rocksdb/listener.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/listener.h rename to include/rocksdb/listener.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/memory_allocator.h b/include/rocksdb/memory_allocator.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/memory_allocator.h rename to include/rocksdb/memory_allocator.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/memtablerep.h b/include/rocksdb/memtablerep.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/memtablerep.h rename to include/rocksdb/memtablerep.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/merge_operator.h b/include/rocksdb/merge_operator.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/merge_operator.h rename to include/rocksdb/merge_operator.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/metadata.h b/include/rocksdb/metadata.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/metadata.h rename to include/rocksdb/metadata.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/options.h b/include/rocksdb/options.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/options.h rename to include/rocksdb/options.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/perf_context.h b/include/rocksdb/perf_context.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/perf_context.h rename to include/rocksdb/perf_context.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/perf_level.h b/include/rocksdb/perf_level.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/perf_level.h rename to include/rocksdb/perf_level.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/persistent_cache.h b/include/rocksdb/persistent_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/persistent_cache.h rename to include/rocksdb/persistent_cache.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/port_defs.h b/include/rocksdb/port_defs.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/port_defs.h rename to include/rocksdb/port_defs.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/rate_limiter.h b/include/rocksdb/rate_limiter.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/rate_limiter.h rename to include/rocksdb/rate_limiter.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/rocksdb_namespace.h b/include/rocksdb/rocksdb_namespace.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/rocksdb_namespace.h rename to include/rocksdb/rocksdb_namespace.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/secondary_cache.h b/include/rocksdb/secondary_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/secondary_cache.h rename to include/rocksdb/secondary_cache.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/slice.h b/include/rocksdb/slice.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/slice.h rename to include/rocksdb/slice.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/slice_transform.h b/include/rocksdb/slice_transform.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/slice_transform.h rename to include/rocksdb/slice_transform.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/snapshot.h b/include/rocksdb/snapshot.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/snapshot.h rename to include/rocksdb/snapshot.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/sst_dump_tool.h b/include/rocksdb/sst_dump_tool.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/sst_dump_tool.h rename to include/rocksdb/sst_dump_tool.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/sst_file_manager.h b/include/rocksdb/sst_file_manager.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/sst_file_manager.h rename to include/rocksdb/sst_file_manager.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/sst_file_reader.h b/include/rocksdb/sst_file_reader.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/sst_file_reader.h rename to include/rocksdb/sst_file_reader.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/sst_file_writer.h b/include/rocksdb/sst_file_writer.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/sst_file_writer.h rename to include/rocksdb/sst_file_writer.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/sst_partitioner.h b/include/rocksdb/sst_partitioner.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/sst_partitioner.h rename to include/rocksdb/sst_partitioner.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/statistics.h b/include/rocksdb/statistics.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/statistics.h rename to include/rocksdb/statistics.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/stats_history.h b/include/rocksdb/stats_history.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/stats_history.h rename to include/rocksdb/stats_history.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/status.h b/include/rocksdb/status.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/status.h rename to include/rocksdb/status.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/system_clock.h b/include/rocksdb/system_clock.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/system_clock.h rename to include/rocksdb/system_clock.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/table.h b/include/rocksdb/table.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/table.h rename to include/rocksdb/table.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/table_properties.h b/include/rocksdb/table_properties.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/table_properties.h rename to include/rocksdb/table_properties.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/table_reader_caller.h b/include/rocksdb/table_reader_caller.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/table_reader_caller.h rename to include/rocksdb/table_reader_caller.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/thread_status.h b/include/rocksdb/thread_status.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/thread_status.h rename to include/rocksdb/thread_status.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/threadpool.h b/include/rocksdb/threadpool.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/threadpool.h rename to include/rocksdb/threadpool.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/trace_reader_writer.h b/include/rocksdb/trace_reader_writer.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/trace_reader_writer.h rename to include/rocksdb/trace_reader_writer.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/trace_record.h b/include/rocksdb/trace_record.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/trace_record.h rename to include/rocksdb/trace_record.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/trace_record_result.h b/include/rocksdb/trace_record_result.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/trace_record_result.h rename to include/rocksdb/trace_record_result.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/transaction_log.h b/include/rocksdb/transaction_log.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/transaction_log.h rename to include/rocksdb/transaction_log.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/types.h b/include/rocksdb/types.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/types.h rename to include/rocksdb/types.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/unique_id.h b/include/rocksdb/unique_id.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/unique_id.h rename to include/rocksdb/unique_id.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/universal_compaction.h b/include/rocksdb/universal_compaction.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/universal_compaction.h rename to include/rocksdb/universal_compaction.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/agg_merge.h b/include/rocksdb/utilities/agg_merge.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/agg_merge.h rename to include/rocksdb/utilities/agg_merge.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/backup_engine.h b/include/rocksdb/utilities/backup_engine.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/backup_engine.h rename to include/rocksdb/utilities/backup_engine.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/cache_dump_load.h b/include/rocksdb/utilities/cache_dump_load.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/cache_dump_load.h rename to include/rocksdb/utilities/cache_dump_load.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/checkpoint.h b/include/rocksdb/utilities/checkpoint.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/checkpoint.h rename to include/rocksdb/utilities/checkpoint.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/convenience.h b/include/rocksdb/utilities/convenience.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/convenience.h rename to include/rocksdb/utilities/convenience.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/customizable_util.h b/include/rocksdb/utilities/customizable_util.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/customizable_util.h rename to include/rocksdb/utilities/customizable_util.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/db_ttl.h b/include/rocksdb/utilities/db_ttl.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/db_ttl.h rename to include/rocksdb/utilities/db_ttl.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/debug.h b/include/rocksdb/utilities/debug.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/debug.h rename to include/rocksdb/utilities/debug.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/env_mirror.h b/include/rocksdb/utilities/env_mirror.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/env_mirror.h rename to include/rocksdb/utilities/env_mirror.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/info_log_finder.h b/include/rocksdb/utilities/info_log_finder.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/info_log_finder.h rename to include/rocksdb/utilities/info_log_finder.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/ldb_cmd.h b/include/rocksdb/utilities/ldb_cmd.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/ldb_cmd.h rename to include/rocksdb/utilities/ldb_cmd.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h b/include/rocksdb/utilities/ldb_cmd_execute_result.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h rename to include/rocksdb/utilities/ldb_cmd_execute_result.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/leveldb_options.h b/include/rocksdb/utilities/leveldb_options.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/leveldb_options.h rename to include/rocksdb/utilities/leveldb_options.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/lua/rocks_lua_custom_library.h b/include/rocksdb/utilities/lua/rocks_lua_custom_library.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/lua/rocks_lua_custom_library.h rename to include/rocksdb/utilities/lua/rocks_lua_custom_library.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/lua/rocks_lua_util.h b/include/rocksdb/utilities/lua/rocks_lua_util.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/lua/rocks_lua_util.h rename to include/rocksdb/utilities/lua/rocks_lua_util.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/memory_util.h b/include/rocksdb/utilities/memory_util.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/memory_util.h rename to include/rocksdb/utilities/memory_util.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/object_registry.h b/include/rocksdb/utilities/object_registry.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/object_registry.h rename to include/rocksdb/utilities/object_registry.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h b/include/rocksdb/utilities/optimistic_transaction_db.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h rename to include/rocksdb/utilities/optimistic_transaction_db.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/option_change_migration.h b/include/rocksdb/utilities/option_change_migration.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/option_change_migration.h rename to include/rocksdb/utilities/option_change_migration.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/options_type.h b/include/rocksdb/utilities/options_type.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/options_type.h rename to include/rocksdb/utilities/options_type.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/options_util.h b/include/rocksdb/utilities/options_util.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/options_util.h rename to include/rocksdb/utilities/options_util.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/replayer.h b/include/rocksdb/utilities/replayer.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/replayer.h rename to include/rocksdb/utilities/replayer.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/sim_cache.h b/include/rocksdb/utilities/sim_cache.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/sim_cache.h rename to include/rocksdb/utilities/sim_cache.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/stackable_db.h b/include/rocksdb/utilities/stackable_db.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/stackable_db.h rename to include/rocksdb/utilities/stackable_db.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/table_properties_collectors.h b/include/rocksdb/utilities/table_properties_collectors.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/table_properties_collectors.h rename to include/rocksdb/utilities/table_properties_collectors.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/transaction.h b/include/rocksdb/utilities/transaction.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/transaction.h rename to include/rocksdb/utilities/transaction.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/transaction_db.h b/include/rocksdb/utilities/transaction_db.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/transaction_db.h rename to include/rocksdb/utilities/transaction_db.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h b/include/rocksdb/utilities/transaction_db_mutex.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h rename to include/rocksdb/utilities/transaction_db_mutex.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/utilities/write_batch_with_index.h b/include/rocksdb/utilities/write_batch_with_index.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/utilities/write_batch_with_index.h rename to include/rocksdb/utilities/write_batch_with_index.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/version.h b/include/rocksdb/version.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/version.h rename to include/rocksdb/version.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/wal_filter.h b/include/rocksdb/wal_filter.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/wal_filter.h rename to include/rocksdb/wal_filter.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/wide_columns.h b/include/rocksdb/wide_columns.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/wide_columns.h rename to include/rocksdb/wide_columns.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/write_batch.h rename to include/rocksdb/write_batch.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/write_batch_base.h b/include/rocksdb/write_batch_base.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/write_batch_base.h rename to include/rocksdb/write_batch_base.h diff --git a/librocksdb-sys/rocksdb/include/rocksdb/write_buffer_manager.h b/include/rocksdb/write_buffer_manager.h similarity index 100% rename from librocksdb-sys/rocksdb/include/rocksdb/write_buffer_manager.h rename to include/rocksdb/write_buffer_manager.h diff --git a/librocksdb-sys/rocksdb/issue_template.md b/issue_template.md similarity index 100% rename from librocksdb-sys/rocksdb/issue_template.md rename to issue_template.md diff --git a/librocksdb-sys/rocksdb/java/CMakeLists.txt b/java/CMakeLists.txt similarity index 100% rename from librocksdb-sys/rocksdb/java/CMakeLists.txt rename to java/CMakeLists.txt diff --git a/librocksdb-sys/rocksdb/java/GetBenchmarks.md b/java/GetBenchmarks.md similarity index 100% rename from librocksdb-sys/rocksdb/java/GetBenchmarks.md rename to java/GetBenchmarks.md diff --git a/librocksdb-sys/rocksdb/java/HISTORY-JAVA.md b/java/HISTORY-JAVA.md similarity index 100% rename from librocksdb-sys/rocksdb/java/HISTORY-JAVA.md rename to java/HISTORY-JAVA.md diff --git a/librocksdb-sys/rocksdb/java/Makefile b/java/Makefile similarity index 100% rename from librocksdb-sys/rocksdb/java/Makefile rename to java/Makefile diff --git a/librocksdb-sys/rocksdb/java/RELEASE.md b/java/RELEASE.md similarity index 100% rename from librocksdb-sys/rocksdb/java/RELEASE.md rename to java/RELEASE.md diff --git a/librocksdb-sys/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java similarity index 100% rename from librocksdb-sys/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java rename to java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java diff --git a/librocksdb-sys/rocksdb/java/crossbuild/Vagrantfile b/java/crossbuild/Vagrantfile similarity index 100% rename from librocksdb-sys/rocksdb/java/crossbuild/Vagrantfile rename to java/crossbuild/Vagrantfile diff --git a/librocksdb-sys/rocksdb/java/crossbuild/build-linux-alpine.sh b/java/crossbuild/build-linux-alpine.sh similarity index 100% rename from librocksdb-sys/rocksdb/java/crossbuild/build-linux-alpine.sh rename to java/crossbuild/build-linux-alpine.sh diff --git a/librocksdb-sys/rocksdb/java/crossbuild/build-linux-centos.sh b/java/crossbuild/build-linux-centos.sh similarity index 100% rename from librocksdb-sys/rocksdb/java/crossbuild/build-linux-centos.sh rename to java/crossbuild/build-linux-centos.sh diff --git a/librocksdb-sys/rocksdb/java/crossbuild/build-linux.sh b/java/crossbuild/build-linux.sh similarity index 100% rename from librocksdb-sys/rocksdb/java/crossbuild/build-linux.sh rename to java/crossbuild/build-linux.sh diff --git a/librocksdb-sys/rocksdb/java/crossbuild/docker-build-linux-alpine.sh b/java/crossbuild/docker-build-linux-alpine.sh similarity index 100% rename from librocksdb-sys/rocksdb/java/crossbuild/docker-build-linux-alpine.sh rename to java/crossbuild/docker-build-linux-alpine.sh diff --git a/librocksdb-sys/rocksdb/java/crossbuild/docker-build-linux-centos.sh b/java/crossbuild/docker-build-linux-centos.sh similarity index 100% rename from librocksdb-sys/rocksdb/java/crossbuild/docker-build-linux-centos.sh rename to java/crossbuild/docker-build-linux-centos.sh diff --git a/librocksdb-sys/rocksdb/java/jdb_bench.sh b/java/jdb_bench.sh similarity index 100% rename from librocksdb-sys/rocksdb/java/jdb_bench.sh rename to java/jdb_bench.sh diff --git a/librocksdb-sys/rocksdb/java/jmh/LICENSE-HEADER.txt b/java/jmh/LICENSE-HEADER.txt similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/LICENSE-HEADER.txt rename to java/jmh/LICENSE-HEADER.txt diff --git a/librocksdb-sys/rocksdb/java/jmh/README.md b/java/jmh/README.md similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/README.md rename to java/jmh/README.md diff --git a/librocksdb-sys/rocksdb/java/jmh/pom.xml b/java/jmh/pom.xml similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/pom.xml rename to java/jmh/pom.xml diff --git a/librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java b/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java rename to java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java diff --git a/librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java b/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java rename to java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java diff --git a/librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java b/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java rename to java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java diff --git a/librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java b/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java rename to java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java diff --git a/librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java b/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java rename to java/jmh/src/main/java/org/rocksdb/util/FileUtils.java diff --git a/librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java b/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java similarity index 100% rename from librocksdb-sys/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java rename to java/jmh/src/main/java/org/rocksdb/util/KVUtils.java diff --git a/librocksdb-sys/rocksdb/java/pom.xml.template b/java/pom.xml.template similarity index 100% rename from librocksdb-sys/rocksdb/java/pom.xml.template rename to java/pom.xml.template diff --git a/librocksdb-sys/rocksdb/java/rocksjni/backup_engine_options.cc b/java/rocksjni/backup_engine_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/backup_engine_options.cc rename to java/rocksjni/backup_engine_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/backupenginejni.cc b/java/rocksjni/backupenginejni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/backupenginejni.cc rename to java/rocksjni/backupenginejni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/cache.cc b/java/rocksjni/cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/cache.cc rename to java/rocksjni/cache.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc b/java/rocksjni/cassandra_compactionfilterjni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc rename to java/rocksjni/cassandra_compactionfilterjni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/cassandra_value_operator.cc b/java/rocksjni/cassandra_value_operator.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/cassandra_value_operator.cc rename to java/rocksjni/cassandra_value_operator.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/checkpoint.cc b/java/rocksjni/checkpoint.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/checkpoint.cc rename to java/rocksjni/checkpoint.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/clock_cache.cc b/java/rocksjni/clock_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/clock_cache.cc rename to java/rocksjni/clock_cache.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/columnfamilyhandle.cc b/java/rocksjni/columnfamilyhandle.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/columnfamilyhandle.cc rename to java/rocksjni/columnfamilyhandle.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compact_range_options.cc b/java/rocksjni/compact_range_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compact_range_options.cc rename to java/rocksjni/compact_range_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_filter.cc b/java/rocksjni/compaction_filter.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_filter.cc rename to java/rocksjni/compaction_filter.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_filter_factory.cc b/java/rocksjni/compaction_filter_factory.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_filter_factory.cc rename to java/rocksjni/compaction_filter_factory.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc b/java/rocksjni/compaction_filter_factory_jnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc rename to java/rocksjni/compaction_filter_factory_jnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h b/java/rocksjni/compaction_filter_factory_jnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h rename to java/rocksjni/compaction_filter_factory_jnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_job_info.cc b/java/rocksjni/compaction_job_info.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_job_info.cc rename to java/rocksjni/compaction_job_info.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_job_stats.cc b/java/rocksjni/compaction_job_stats.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_job_stats.cc rename to java/rocksjni/compaction_job_stats.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_options.cc b/java/rocksjni/compaction_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_options.cc rename to java/rocksjni/compaction_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_options_fifo.cc b/java/rocksjni/compaction_options_fifo.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_options_fifo.cc rename to java/rocksjni/compaction_options_fifo.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compaction_options_universal.cc b/java/rocksjni/compaction_options_universal.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compaction_options_universal.cc rename to java/rocksjni/compaction_options_universal.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/comparator.cc b/java/rocksjni/comparator.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/comparator.cc rename to java/rocksjni/comparator.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/comparatorjnicallback.cc b/java/rocksjni/comparatorjnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/comparatorjnicallback.cc rename to java/rocksjni/comparatorjnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/comparatorjnicallback.h b/java/rocksjni/comparatorjnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/comparatorjnicallback.h rename to java/rocksjni/comparatorjnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/compression_options.cc b/java/rocksjni/compression_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/compression_options.cc rename to java/rocksjni/compression_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/concurrent_task_limiter.cc b/java/rocksjni/concurrent_task_limiter.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/concurrent_task_limiter.cc rename to java/rocksjni/concurrent_task_limiter.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/config_options.cc b/java/rocksjni/config_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/config_options.cc rename to java/rocksjni/config_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/cplusplus_to_java_convert.h b/java/rocksjni/cplusplus_to_java_convert.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/cplusplus_to_java_convert.h rename to java/rocksjni/cplusplus_to_java_convert.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/env.cc b/java/rocksjni/env.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/env.cc rename to java/rocksjni/env.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/env_options.cc b/java/rocksjni/env_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/env_options.cc rename to java/rocksjni/env_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/event_listener.cc b/java/rocksjni/event_listener.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/event_listener.cc rename to java/rocksjni/event_listener.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/event_listener_jnicallback.cc b/java/rocksjni/event_listener_jnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/event_listener_jnicallback.cc rename to java/rocksjni/event_listener_jnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/event_listener_jnicallback.h b/java/rocksjni/event_listener_jnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/event_listener_jnicallback.h rename to java/rocksjni/event_listener_jnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/filter.cc b/java/rocksjni/filter.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/filter.cc rename to java/rocksjni/filter.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/ingest_external_file_options.cc b/java/rocksjni/ingest_external_file_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/ingest_external_file_options.cc rename to java/rocksjni/ingest_external_file_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/iterator.cc b/java/rocksjni/iterator.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/iterator.cc rename to java/rocksjni/iterator.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/jnicallback.cc b/java/rocksjni/jnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/jnicallback.cc rename to java/rocksjni/jnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/jnicallback.h b/java/rocksjni/jnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/jnicallback.h rename to java/rocksjni/jnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/loggerjnicallback.cc b/java/rocksjni/loggerjnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/loggerjnicallback.cc rename to java/rocksjni/loggerjnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/loggerjnicallback.h b/java/rocksjni/loggerjnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/loggerjnicallback.h rename to java/rocksjni/loggerjnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/lru_cache.cc b/java/rocksjni/lru_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/lru_cache.cc rename to java/rocksjni/lru_cache.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/memory_util.cc b/java/rocksjni/memory_util.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/memory_util.cc rename to java/rocksjni/memory_util.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/memtablejni.cc b/java/rocksjni/memtablejni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/memtablejni.cc rename to java/rocksjni/memtablejni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/merge_operator.cc b/java/rocksjni/merge_operator.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/merge_operator.cc rename to java/rocksjni/merge_operator.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc b/java/rocksjni/native_comparator_wrapper_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc rename to java/rocksjni/native_comparator_wrapper_test.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/optimistic_transaction_db.cc b/java/rocksjni/optimistic_transaction_db.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/optimistic_transaction_db.cc rename to java/rocksjni/optimistic_transaction_db.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/optimistic_transaction_options.cc b/java/rocksjni/optimistic_transaction_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/optimistic_transaction_options.cc rename to java/rocksjni/optimistic_transaction_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/options.cc b/java/rocksjni/options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/options.cc rename to java/rocksjni/options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/options_util.cc b/java/rocksjni/options_util.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/options_util.cc rename to java/rocksjni/options_util.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/persistent_cache.cc b/java/rocksjni/persistent_cache.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/persistent_cache.cc rename to java/rocksjni/persistent_cache.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/portal.h b/java/rocksjni/portal.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/portal.h rename to java/rocksjni/portal.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/ratelimiterjni.cc b/java/rocksjni/ratelimiterjni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/ratelimiterjni.cc rename to java/rocksjni/ratelimiterjni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc b/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc rename to java/rocksjni/remove_emptyvalue_compactionfilterjni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/restorejni.cc b/java/rocksjni/restorejni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/restorejni.cc rename to java/rocksjni/restorejni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/rocks_callback_object.cc b/java/rocksjni/rocks_callback_object.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/rocks_callback_object.cc rename to java/rocksjni/rocks_callback_object.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/rocksdb_exception_test.cc b/java/rocksjni/rocksdb_exception_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/rocksdb_exception_test.cc rename to java/rocksjni/rocksdb_exception_test.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/rocksjni.cc b/java/rocksjni/rocksjni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/rocksjni.cc rename to java/rocksjni/rocksjni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/slice.cc b/java/rocksjni/slice.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/slice.cc rename to java/rocksjni/slice.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/snapshot.cc b/java/rocksjni/snapshot.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/snapshot.cc rename to java/rocksjni/snapshot.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/sst_file_manager.cc b/java/rocksjni/sst_file_manager.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/sst_file_manager.cc rename to java/rocksjni/sst_file_manager.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/sst_file_reader_iterator.cc b/java/rocksjni/sst_file_reader_iterator.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/sst_file_reader_iterator.cc rename to java/rocksjni/sst_file_reader_iterator.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/sst_file_readerjni.cc b/java/rocksjni/sst_file_readerjni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/sst_file_readerjni.cc rename to java/rocksjni/sst_file_readerjni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/sst_file_writerjni.cc b/java/rocksjni/sst_file_writerjni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/sst_file_writerjni.cc rename to java/rocksjni/sst_file_writerjni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/sst_partitioner.cc b/java/rocksjni/sst_partitioner.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/sst_partitioner.cc rename to java/rocksjni/sst_partitioner.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/statistics.cc b/java/rocksjni/statistics.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/statistics.cc rename to java/rocksjni/statistics.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/statisticsjni.cc b/java/rocksjni/statisticsjni.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/statisticsjni.cc rename to java/rocksjni/statisticsjni.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/statisticsjni.h b/java/rocksjni/statisticsjni.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/statisticsjni.h rename to java/rocksjni/statisticsjni.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/table.cc b/java/rocksjni/table.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/table.cc rename to java/rocksjni/table.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/table_filter.cc b/java/rocksjni/table_filter.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/table_filter.cc rename to java/rocksjni/table_filter.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/table_filter_jnicallback.cc b/java/rocksjni/table_filter_jnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/table_filter_jnicallback.cc rename to java/rocksjni/table_filter_jnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/table_filter_jnicallback.h b/java/rocksjni/table_filter_jnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/table_filter_jnicallback.h rename to java/rocksjni/table_filter_jnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/testable_event_listener.cc b/java/rocksjni/testable_event_listener.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/testable_event_listener.cc rename to java/rocksjni/testable_event_listener.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/thread_status.cc b/java/rocksjni/thread_status.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/thread_status.cc rename to java/rocksjni/thread_status.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/trace_writer.cc b/java/rocksjni/trace_writer.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/trace_writer.cc rename to java/rocksjni/trace_writer.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/trace_writer_jnicallback.cc b/java/rocksjni/trace_writer_jnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/trace_writer_jnicallback.cc rename to java/rocksjni/trace_writer_jnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/trace_writer_jnicallback.h b/java/rocksjni/trace_writer_jnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/trace_writer_jnicallback.h rename to java/rocksjni/trace_writer_jnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction.cc b/java/rocksjni/transaction.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction.cc rename to java/rocksjni/transaction.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction_db.cc b/java/rocksjni/transaction_db.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction_db.cc rename to java/rocksjni/transaction_db.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction_db_options.cc b/java/rocksjni/transaction_db_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction_db_options.cc rename to java/rocksjni/transaction_db_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction_log.cc b/java/rocksjni/transaction_log.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction_log.cc rename to java/rocksjni/transaction_log.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction_notifier.cc b/java/rocksjni/transaction_notifier.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction_notifier.cc rename to java/rocksjni/transaction_notifier.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc b/java/rocksjni/transaction_notifier_jnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc rename to java/rocksjni/transaction_notifier_jnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h b/java/rocksjni/transaction_notifier_jnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h rename to java/rocksjni/transaction_notifier_jnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/transaction_options.cc b/java/rocksjni/transaction_options.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/transaction_options.cc rename to java/rocksjni/transaction_options.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/ttl.cc b/java/rocksjni/ttl.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/ttl.cc rename to java/rocksjni/ttl.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/wal_filter.cc b/java/rocksjni/wal_filter.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/wal_filter.cc rename to java/rocksjni/wal_filter.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/wal_filter_jnicallback.cc b/java/rocksjni/wal_filter_jnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/wal_filter_jnicallback.cc rename to java/rocksjni/wal_filter_jnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/wal_filter_jnicallback.h b/java/rocksjni/wal_filter_jnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/wal_filter_jnicallback.h rename to java/rocksjni/wal_filter_jnicallback.h diff --git a/librocksdb-sys/rocksdb/java/rocksjni/write_batch.cc b/java/rocksjni/write_batch.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/write_batch.cc rename to java/rocksjni/write_batch.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/write_batch_test.cc b/java/rocksjni/write_batch_test.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/write_batch_test.cc rename to java/rocksjni/write_batch_test.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/write_batch_with_index.cc b/java/rocksjni/write_batch_with_index.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/write_batch_with_index.cc rename to java/rocksjni/write_batch_with_index.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/write_buffer_manager.cc b/java/rocksjni/write_buffer_manager.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/write_buffer_manager.cc rename to java/rocksjni/write_buffer_manager.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc b/java/rocksjni/writebatchhandlerjnicallback.cc similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc rename to java/rocksjni/writebatchhandlerjnicallback.cc diff --git a/librocksdb-sys/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h b/java/rocksjni/writebatchhandlerjnicallback.h similarity index 100% rename from librocksdb-sys/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h rename to java/rocksjni/writebatchhandlerjnicallback.h diff --git a/librocksdb-sys/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java b/java/samples/src/main/java/OptimisticTransactionSample.java similarity index 100% rename from librocksdb-sys/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java rename to java/samples/src/main/java/OptimisticTransactionSample.java diff --git a/librocksdb-sys/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java b/java/samples/src/main/java/RocksDBColumnFamilySample.java similarity index 100% rename from librocksdb-sys/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java rename to java/samples/src/main/java/RocksDBColumnFamilySample.java diff --git a/librocksdb-sys/rocksdb/java/samples/src/main/java/RocksDBSample.java b/java/samples/src/main/java/RocksDBSample.java similarity index 100% rename from librocksdb-sys/rocksdb/java/samples/src/main/java/RocksDBSample.java rename to java/samples/src/main/java/RocksDBSample.java diff --git a/librocksdb-sys/rocksdb/java/samples/src/main/java/TransactionSample.java b/java/samples/src/main/java/TransactionSample.java similarity index 100% rename from librocksdb-sys/rocksdb/java/samples/src/main/java/TransactionSample.java rename to java/samples/src/main/java/TransactionSample.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java rename to java/src/main/java/org/rocksdb/AbstractCompactionFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java rename to java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java b/java/src/main/java/org/rocksdb/AbstractComparator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java rename to java/src/main/java/org/rocksdb/AbstractComparator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java rename to java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractEventListener.java rename to java/src/main/java/org/rocksdb/AbstractEventListener.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java rename to java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java rename to java/src/main/java/org/rocksdb/AbstractMutableOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java b/java/src/main/java/org/rocksdb/AbstractNativeReference.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java rename to java/src/main/java/org/rocksdb/AbstractNativeReference.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java rename to java/src/main/java/org/rocksdb/AbstractRocksIterator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java rename to java/src/main/java/org/rocksdb/AbstractSlice.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/java/src/main/java/org/rocksdb/AbstractTableFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java rename to java/src/main/java/org/rocksdb/AbstractTableFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java b/java/src/main/java/org/rocksdb/AbstractTraceWriter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java rename to java/src/main/java/org/rocksdb/AbstractTraceWriter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java rename to java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java b/java/src/main/java/org/rocksdb/AbstractWalFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java rename to java/src/main/java/org/rocksdb/AbstractWalFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java rename to java/src/main/java/org/rocksdb/AbstractWriteBatch.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java b/java/src/main/java/org/rocksdb/AccessHint.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java rename to java/src/main/java/org/rocksdb/AccessHint.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java rename to java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java rename to java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackgroundErrorReason.java b/java/src/main/java/org/rocksdb/BackgroundErrorReason.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackgroundErrorReason.java rename to java/src/main/java/org/rocksdb/BackgroundErrorReason.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java rename to java/src/main/java/org/rocksdb/BackupEngine.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackupEngineOptions.java b/java/src/main/java/org/rocksdb/BackupEngineOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackupEngineOptions.java rename to java/src/main/java/org/rocksdb/BackupEngineOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java b/java/src/main/java/org/rocksdb/BackupInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java rename to java/src/main/java/org/rocksdb/BackupInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java rename to java/src/main/java/org/rocksdb/BlockBasedTableConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java b/java/src/main/java/org/rocksdb/BloomFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java rename to java/src/main/java/org/rocksdb/BloomFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java b/java/src/main/java/org/rocksdb/BuiltinComparator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java rename to java/src/main/java/org/rocksdb/BuiltinComparator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java rename to java/src/main/java/org/rocksdb/ByteBufferGetStatus.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Cache.java rename to java/src/main/java/org/rocksdb/Cache.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java rename to java/src/main/java/org/rocksdb/CassandraCompactionFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java rename to java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java rename to java/src/main/java/org/rocksdb/Checkpoint.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java b/java/src/main/java/org/rocksdb/ChecksumType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java rename to java/src/main/java/org/rocksdb/ChecksumType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java rename to java/src/main/java/org/rocksdb/ClockCache.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java b/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java rename to java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java rename to java/src/main/java/org/rocksdb/ColumnFamilyHandle.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java b/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java rename to java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java rename to java/src/main/java/org/rocksdb/ColumnFamilyOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java rename to java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/java/src/main/java/org/rocksdb/CompactRangeOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java rename to java/src/main/java/org/rocksdb/CompactRangeOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/java/src/main/java/org/rocksdb/CompactionJobInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java rename to java/src/main/java/org/rocksdb/CompactionJobInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java b/java/src/main/java/org/rocksdb/CompactionJobStats.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java rename to java/src/main/java/org/rocksdb/CompactionJobStats.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java rename to java/src/main/java/org/rocksdb/CompactionOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java rename to java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java rename to java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java b/java/src/main/java/org/rocksdb/CompactionPriority.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java rename to java/src/main/java/org/rocksdb/CompactionPriority.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java rename to java/src/main/java/org/rocksdb/CompactionReason.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java b/java/src/main/java/org/rocksdb/CompactionStopStyle.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java rename to java/src/main/java/org/rocksdb/CompactionStopStyle.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java rename to java/src/main/java/org/rocksdb/CompactionStyle.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java b/java/src/main/java/org/rocksdb/ComparatorOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java rename to java/src/main/java/org/rocksdb/ComparatorOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java b/java/src/main/java/org/rocksdb/ComparatorType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java rename to java/src/main/java/org/rocksdb/ComparatorType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java rename to java/src/main/java/org/rocksdb/CompressionOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java rename to java/src/main/java/org/rocksdb/CompressionType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java rename to java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java rename to java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ConfigOptions.java b/java/src/main/java/org/rocksdb/ConfigOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ConfigOptions.java rename to java/src/main/java/org/rocksdb/ConfigOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java b/java/src/main/java/org/rocksdb/DBOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java rename to java/src/main/java/org/rocksdb/DBOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java rename to java/src/main/java/org/rocksdb/DBOptionsInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java b/java/src/main/java/org/rocksdb/DataBlockIndexType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java rename to java/src/main/java/org/rocksdb/DataBlockIndexType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DbPath.java rename to java/src/main/java/org/rocksdb/DbPath.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java rename to java/src/main/java/org/rocksdb/DirectSlice.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java b/java/src/main/java/org/rocksdb/EncodingType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java rename to java/src/main/java/org/rocksdb/EncodingType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Env.java b/java/src/main/java/org/rocksdb/Env.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Env.java rename to java/src/main/java/org/rocksdb/Env.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java b/java/src/main/java/org/rocksdb/EnvOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java rename to java/src/main/java/org/rocksdb/EnvOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/EventListener.java b/java/src/main/java/org/rocksdb/EventListener.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/EventListener.java rename to java/src/main/java/org/rocksdb/EventListener.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Experimental.java b/java/src/main/java/org/rocksdb/Experimental.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Experimental.java rename to java/src/main/java/org/rocksdb/Experimental.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java rename to java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FileOperationInfo.java b/java/src/main/java/org/rocksdb/FileOperationInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FileOperationInfo.java rename to java/src/main/java/org/rocksdb/FileOperationInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Filter.java rename to java/src/main/java/org/rocksdb/Filter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FlushJobInfo.java rename to java/src/main/java/org/rocksdb/FlushJobInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java b/java/src/main/java/org/rocksdb/FlushOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java rename to java/src/main/java/org/rocksdb/FlushOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FlushReason.java b/java/src/main/java/org/rocksdb/FlushReason.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/FlushReason.java rename to java/src/main/java/org/rocksdb/FlushReason.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java rename to java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java rename to java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java b/java/src/main/java/org/rocksdb/HistogramData.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java rename to java/src/main/java/org/rocksdb/HistogramData.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java rename to java/src/main/java/org/rocksdb/HistogramType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Holder.java b/java/src/main/java/org/rocksdb/Holder.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Holder.java rename to java/src/main/java/org/rocksdb/Holder.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/IndexShorteningMode.java b/java/src/main/java/org/rocksdb/IndexShorteningMode.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/IndexShorteningMode.java rename to java/src/main/java/org/rocksdb/IndexShorteningMode.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/IndexType.java b/java/src/main/java/org/rocksdb/IndexType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/IndexType.java rename to java/src/main/java/org/rocksdb/IndexType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java rename to java/src/main/java/org/rocksdb/InfoLogLevel.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java rename to java/src/main/java/org/rocksdb/IngestExternalFileOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/KeyMayExist.java rename to java/src/main/java/org/rocksdb/KeyMayExist.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java b/java/src/main/java/org/rocksdb/LRUCache.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java rename to java/src/main/java/org/rocksdb/LRUCache.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java b/java/src/main/java/org/rocksdb/LevelMetaData.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java rename to java/src/main/java/org/rocksdb/LevelMetaData.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java rename to java/src/main/java/org/rocksdb/LiveFileMetaData.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LogFile.java b/java/src/main/java/org/rocksdb/LogFile.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/LogFile.java rename to java/src/main/java/org/rocksdb/LogFile.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Logger.java rename to java/src/main/java/org/rocksdb/Logger.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java b/java/src/main/java/org/rocksdb/MemTableConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java rename to java/src/main/java/org/rocksdb/MemTableConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemTableInfo.java rename to java/src/main/java/org/rocksdb/MemTableInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java b/java/src/main/java/org/rocksdb/MemoryUsageType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java rename to java/src/main/java/org/rocksdb/MemoryUsageType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java b/java/src/main/java/org/rocksdb/MemoryUtil.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java rename to java/src/main/java/org/rocksdb/MemoryUtil.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java rename to java/src/main/java/org/rocksdb/MergeOperator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java rename to java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java rename to java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java rename to java/src/main/java/org/rocksdb/MutableDBOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java rename to java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java b/java/src/main/java/org/rocksdb/MutableOptionKey.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java rename to java/src/main/java/org/rocksdb/MutableOptionKey.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java b/java/src/main/java/org/rocksdb/MutableOptionValue.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java rename to java/src/main/java/org/rocksdb/MutableOptionValue.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java b/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java rename to java/src/main/java/org/rocksdb/NativeComparatorWrapper.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java b/java/src/main/java/org/rocksdb/NativeLibraryLoader.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java rename to java/src/main/java/org/rocksdb/NativeLibraryLoader.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java b/java/src/main/java/org/rocksdb/OperationStage.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java rename to java/src/main/java/org/rocksdb/OperationStage.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OperationType.java b/java/src/main/java/org/rocksdb/OperationType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OperationType.java rename to java/src/main/java/org/rocksdb/OperationType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java rename to java/src/main/java/org/rocksdb/OptimisticTransactionDB.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java rename to java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptionString.java b/java/src/main/java/org/rocksdb/OptionString.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptionString.java rename to java/src/main/java/org/rocksdb/OptionString.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Options.java rename to java/src/main/java/org/rocksdb/Options.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java b/java/src/main/java/org/rocksdb/OptionsUtil.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java rename to java/src/main/java/org/rocksdb/OptionsUtil.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java b/java/src/main/java/org/rocksdb/PersistentCache.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java rename to java/src/main/java/org/rocksdb/PersistentCache.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java b/java/src/main/java/org/rocksdb/PlainTableConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java rename to java/src/main/java/org/rocksdb/PlainTableConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java rename to java/src/main/java/org/rocksdb/PrepopulateBlobCache.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Priority.java b/java/src/main/java/org/rocksdb/Priority.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Priority.java rename to java/src/main/java/org/rocksdb/Priority.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Range.java b/java/src/main/java/org/rocksdb/Range.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Range.java rename to java/src/main/java/org/rocksdb/Range.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java b/java/src/main/java/org/rocksdb/RateLimiter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java rename to java/src/main/java/org/rocksdb/RateLimiter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java b/java/src/main/java/org/rocksdb/RateLimiterMode.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java rename to java/src/main/java/org/rocksdb/RateLimiterMode.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java rename to java/src/main/java/org/rocksdb/ReadOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java b/java/src/main/java/org/rocksdb/ReadTier.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java rename to java/src/main/java/org/rocksdb/ReadTier.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java rename to java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java b/java/src/main/java/org/rocksdb/RestoreOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java rename to java/src/main/java/org/rocksdb/RestoreOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java b/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java rename to java/src/main/java/org/rocksdb/ReusedSynchronisationType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java rename to java/src/main/java/org/rocksdb/RocksCallbackObject.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java rename to java/src/main/java/org/rocksdb/RocksDB.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java b/java/src/main/java/org/rocksdb/RocksDBException.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java rename to java/src/main/java/org/rocksdb/RocksDBException.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java b/java/src/main/java/org/rocksdb/RocksEnv.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java rename to java/src/main/java/org/rocksdb/RocksEnv.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java b/java/src/main/java/org/rocksdb/RocksIterator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java rename to java/src/main/java/org/rocksdb/RocksIterator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/java/src/main/java/org/rocksdb/RocksIteratorInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java rename to java/src/main/java/org/rocksdb/RocksIteratorInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java b/java/src/main/java/org/rocksdb/RocksMemEnv.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java rename to java/src/main/java/org/rocksdb/RocksMemEnv.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java rename to java/src/main/java/org/rocksdb/RocksMutableObject.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java rename to java/src/main/java/org/rocksdb/RocksObject.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SanityLevel.java b/java/src/main/java/org/rocksdb/SanityLevel.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SanityLevel.java rename to java/src/main/java/org/rocksdb/SanityLevel.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java rename to java/src/main/java/org/rocksdb/SizeApproximationFlag.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java rename to java/src/main/java/org/rocksdb/SkipListMemTableConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Slice.java b/java/src/main/java/org/rocksdb/Slice.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Slice.java rename to java/src/main/java/org/rocksdb/Slice.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java b/java/src/main/java/org/rocksdb/Snapshot.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java rename to java/src/main/java/org/rocksdb/Snapshot.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java b/java/src/main/java/org/rocksdb/SstFileManager.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java rename to java/src/main/java/org/rocksdb/SstFileManager.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java b/java/src/main/java/org/rocksdb/SstFileMetaData.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java rename to java/src/main/java/org/rocksdb/SstFileMetaData.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java b/java/src/main/java/org/rocksdb/SstFileReader.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java rename to java/src/main/java/org/rocksdb/SstFileReader.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java rename to java/src/main/java/org/rocksdb/SstFileReaderIterator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java b/java/src/main/java/org/rocksdb/SstFileWriter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java rename to java/src/main/java/org/rocksdb/SstFileWriter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFactory.java rename to java/src/main/java/org/rocksdb/SstPartitionerFactory.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java rename to java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StateType.java rename to java/src/main/java/org/rocksdb/StateType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Statistics.java rename to java/src/main/java/org/rocksdb/Statistics.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java b/java/src/main/java/org/rocksdb/StatisticsCollector.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java rename to java/src/main/java/org/rocksdb/StatisticsCollector.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java rename to java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/java/src/main/java/org/rocksdb/StatsCollectorInput.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java rename to java/src/main/java/org/rocksdb/StatsCollectorInput.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java b/java/src/main/java/org/rocksdb/StatsLevel.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java rename to java/src/main/java/org/rocksdb/StatsLevel.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Status.java rename to java/src/main/java/org/rocksdb/Status.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java rename to java/src/main/java/org/rocksdb/StringAppendOperator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java rename to java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationInfo.java rename to java/src/main/java/org/rocksdb/TableFileCreationInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/java/src/main/java/org/rocksdb/TableFileCreationReason.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationReason.java rename to java/src/main/java/org/rocksdb/TableFileCreationReason.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java rename to java/src/main/java/org/rocksdb/TableFileDeletionInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java b/java/src/main/java/org/rocksdb/TableFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java rename to java/src/main/java/org/rocksdb/TableFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java b/java/src/main/java/org/rocksdb/TableFormatConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java rename to java/src/main/java/org/rocksdb/TableFormatConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java b/java/src/main/java/org/rocksdb/TableProperties.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java rename to java/src/main/java/org/rocksdb/TableProperties.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java rename to java/src/main/java/org/rocksdb/ThreadStatus.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java b/java/src/main/java/org/rocksdb/ThreadType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java rename to java/src/main/java/org/rocksdb/ThreadType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TickerType.java rename to java/src/main/java/org/rocksdb/TickerType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java b/java/src/main/java/org/rocksdb/TimedEnv.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java rename to java/src/main/java/org/rocksdb/TimedEnv.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java b/java/src/main/java/org/rocksdb/TraceOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java rename to java/src/main/java/org/rocksdb/TraceOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java b/java/src/main/java/org/rocksdb/TraceWriter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java rename to java/src/main/java/org/rocksdb/TraceWriter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/Transaction.java rename to java/src/main/java/org/rocksdb/Transaction.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java rename to java/src/main/java/org/rocksdb/TransactionDB.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java rename to java/src/main/java/org/rocksdb/TransactionDBOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java b/java/src/main/java/org/rocksdb/TransactionLogIterator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java rename to java/src/main/java/org/rocksdb/TransactionLogIterator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java rename to java/src/main/java/org/rocksdb/TransactionOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java b/java/src/main/java/org/rocksdb/TransactionalDB.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java rename to java/src/main/java/org/rocksdb/TransactionalDB.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java b/java/src/main/java/org/rocksdb/TransactionalOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java rename to java/src/main/java/org/rocksdb/TransactionalOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java b/java/src/main/java/org/rocksdb/TtlDB.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java rename to java/src/main/java/org/rocksdb/TtlDB.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java b/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java rename to java/src/main/java/org/rocksdb/TxnDBWritePolicy.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java rename to java/src/main/java/org/rocksdb/UInt64AddOperator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java rename to java/src/main/java/org/rocksdb/VectorMemTableConfig.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/java/src/main/java/org/rocksdb/WALRecoveryMode.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java rename to java/src/main/java/org/rocksdb/WALRecoveryMode.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java rename to java/src/main/java/org/rocksdb/WBWIRocksIterator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java b/java/src/main/java/org/rocksdb/WalFileType.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java rename to java/src/main/java/org/rocksdb/WalFileType.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java rename to java/src/main/java/org/rocksdb/WalFilter.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java b/java/src/main/java/org/rocksdb/WalProcessingOption.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java rename to java/src/main/java/org/rocksdb/WalProcessingOption.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java rename to java/src/main/java/org/rocksdb/WriteBatch.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/java/src/main/java/org/rocksdb/WriteBatchInterface.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java rename to java/src/main/java/org/rocksdb/WriteBatchInterface.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java rename to java/src/main/java/org/rocksdb/WriteBatchWithIndex.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java rename to java/src/main/java/org/rocksdb/WriteBufferManager.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java b/java/src/main/java/org/rocksdb/WriteOptions.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java rename to java/src/main/java/org/rocksdb/WriteOptions.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteStallCondition.java b/java/src/main/java/org/rocksdb/WriteStallCondition.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteStallCondition.java rename to java/src/main/java/org/rocksdb/WriteStallCondition.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteStallInfo.java b/java/src/main/java/org/rocksdb/WriteStallInfo.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/WriteStallInfo.java rename to java/src/main/java/org/rocksdb/WriteStallInfo.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java b/java/src/main/java/org/rocksdb/util/ByteUtil.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java rename to java/src/main/java/org/rocksdb/util/ByteUtil.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java rename to java/src/main/java/org/rocksdb/util/BytewiseComparator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java rename to java/src/main/java/org/rocksdb/util/Environment.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java b/java/src/main/java/org/rocksdb/util/IntComparator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java rename to java/src/main/java/org/rocksdb/util/IntComparator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java rename to java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java diff --git a/librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java rename to java/src/main/java/org/rocksdb/util/SizeUnit.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java b/java/src/test/java/org/rocksdb/AbstractTransactionTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java rename to java/src/test/java/org/rocksdb/AbstractTransactionTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java b/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java rename to java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java b/java/src/test/java/org/rocksdb/BackupEngineTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java rename to java/src/test/java/org/rocksdb/BackupEngineTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BlobOptionsTest.java b/java/src/test/java/org/rocksdb/BlobOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BlobOptionsTest.java rename to java/src/test/java/org/rocksdb/BlobOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java rename to java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java b/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java rename to java/src/test/java/org/rocksdb/BuiltinComparatorTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ByteBufferUnsupportedOperationTest.java b/java/src/test/java/org/rocksdb/ByteBufferUnsupportedOperationTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ByteBufferUnsupportedOperationTest.java rename to java/src/test/java/org/rocksdb/ByteBufferUnsupportedOperationTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java b/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java rename to java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java b/java/src/test/java/org/rocksdb/CheckPointTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java rename to java/src/test/java/org/rocksdb/CheckPointTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java b/java/src/test/java/org/rocksdb/ClockCacheTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java rename to java/src/test/java/org/rocksdb/ClockCacheTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java rename to java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/java/src/test/java/org/rocksdb/ColumnFamilyTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java rename to java/src/test/java/org/rocksdb/ColumnFamilyTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java rename to java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java b/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java rename to java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java b/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java rename to java/src/test/java/org/rocksdb/CompactionJobInfoTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java b/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java rename to java/src/test/java/org/rocksdb/CompactionJobStatsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java rename to java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java rename to java/src/test/java/org/rocksdb/CompactionOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java b/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java rename to java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java b/java/src/test/java/org/rocksdb/CompactionPriorityTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java rename to java/src/test/java/org/rocksdb/CompactionPriorityTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java b/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java rename to java/src/test/java/org/rocksdb/CompactionStopStyleTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java b/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java rename to java/src/test/java/org/rocksdb/ComparatorOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/java/src/test/java/org/rocksdb/CompressionOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java rename to java/src/test/java/org/rocksdb/CompressionOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java b/java/src/test/java/org/rocksdb/CompressionTypesTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java rename to java/src/test/java/org/rocksdb/CompressionTypesTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java b/java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java rename to java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java b/java/src/test/java/org/rocksdb/DBOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java rename to java/src/test/java/org/rocksdb/DBOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java b/java/src/test/java/org/rocksdb/DefaultEnvTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java rename to java/src/test/java/org/rocksdb/DefaultEnvTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java b/java/src/test/java/org/rocksdb/DirectSliceTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java rename to java/src/test/java/org/rocksdb/DirectSliceTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java b/java/src/test/java/org/rocksdb/EnvOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java rename to java/src/test/java/org/rocksdb/EnvOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/EventListenerTest.java b/java/src/test/java/org/rocksdb/EventListenerTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/EventListenerTest.java rename to java/src/test/java/org/rocksdb/EventListenerTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java b/java/src/test/java/org/rocksdb/FilterTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java rename to java/src/test/java/org/rocksdb/FilterTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java b/java/src/test/java/org/rocksdb/FlushOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java rename to java/src/test/java/org/rocksdb/FlushOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java b/java/src/test/java/org/rocksdb/FlushTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java rename to java/src/test/java/org/rocksdb/FlushTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java b/java/src/test/java/org/rocksdb/InfoLogLevelTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java rename to java/src/test/java/org/rocksdb/InfoLogLevelTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java b/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java rename to java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java b/java/src/test/java/org/rocksdb/KeyMayExistTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java rename to java/src/test/java/org/rocksdb/KeyMayExistTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java b/java/src/test/java/org/rocksdb/LRUCacheTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java rename to java/src/test/java/org/rocksdb/LRUCacheTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java b/java/src/test/java/org/rocksdb/LoggerTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java rename to java/src/test/java/org/rocksdb/LoggerTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java b/java/src/test/java/org/rocksdb/MemTableTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java rename to java/src/test/java/org/rocksdb/MemTableTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java b/java/src/test/java/org/rocksdb/MemoryUtilTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java rename to java/src/test/java/org/rocksdb/MemoryUtilTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java b/java/src/test/java/org/rocksdb/MergeTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java rename to java/src/test/java/org/rocksdb/MergeTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java b/java/src/test/java/org/rocksdb/MixedOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java rename to java/src/test/java/org/rocksdb/MixedOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java b/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java rename to java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java b/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java rename to java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MultiGetTest.java b/java/src/test/java/org/rocksdb/MultiGetTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MultiGetTest.java rename to java/src/test/java/org/rocksdb/MultiGetTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java b/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java rename to java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java b/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java rename to java/src/test/java/org/rocksdb/MutableDBOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java b/java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java rename to java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java b/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java rename to java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java rename to java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java b/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java rename to java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java b/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java rename to java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java b/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java rename to java/src/test/java/org/rocksdb/OptimisticTransactionTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java rename to java/src/test/java/org/rocksdb/OptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java b/java/src/test/java/org/rocksdb/OptionsUtilTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java rename to java/src/test/java/org/rocksdb/OptionsUtilTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/java/src/test/java/org/rocksdb/PlainTableConfigTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java rename to java/src/test/java/org/rocksdb/PlainTableConfigTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java b/java/src/test/java/org/rocksdb/PlatformRandomHelper.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java rename to java/src/test/java/org/rocksdb/PlatformRandomHelper.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java b/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java rename to java/src/test/java/org/rocksdb/PutMultiplePartsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java b/java/src/test/java/org/rocksdb/RateLimiterTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java rename to java/src/test/java/org/rocksdb/RateLimiterTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/java/src/test/java/org/rocksdb/ReadOnlyTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java rename to java/src/test/java/org/rocksdb/ReadOnlyTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/java/src/test/java/org/rocksdb/ReadOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java rename to java/src/test/java/org/rocksdb/ReadOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java b/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java rename to java/src/test/java/org/rocksdb/RocksDBExceptionTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java b/java/src/test/java/org/rocksdb/RocksDBTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java rename to java/src/test/java/org/rocksdb/RocksDBTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java b/java/src/test/java/org/rocksdb/RocksIteratorTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java rename to java/src/test/java/org/rocksdb/RocksIteratorTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/java/src/test/java/org/rocksdb/RocksMemEnvTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java rename to java/src/test/java/org/rocksdb/RocksMemEnvTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java b/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java rename to java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SecondaryDBTest.java b/java/src/test/java/org/rocksdb/SecondaryDBTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SecondaryDBTest.java rename to java/src/test/java/org/rocksdb/SecondaryDBTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java b/java/src/test/java/org/rocksdb/SliceTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java rename to java/src/test/java/org/rocksdb/SliceTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java b/java/src/test/java/org/rocksdb/SnapshotTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java rename to java/src/test/java/org/rocksdb/SnapshotTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/java/src/test/java/org/rocksdb/SstFileManagerTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java rename to java/src/test/java/org/rocksdb/SstFileManagerTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java b/java/src/test/java/org/rocksdb/SstFileReaderTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java rename to java/src/test/java/org/rocksdb/SstFileReaderTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java b/java/src/test/java/org/rocksdb/SstFileWriterTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java rename to java/src/test/java/org/rocksdb/SstFileWriterTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstPartitionerTest.java b/java/src/test/java/org/rocksdb/SstPartitionerTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/SstPartitionerTest.java rename to java/src/test/java/org/rocksdb/SstPartitionerTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java b/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java rename to java/src/test/java/org/rocksdb/StatisticsCollectorTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java b/java/src/test/java/org/rocksdb/StatisticsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java rename to java/src/test/java/org/rocksdb/StatisticsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java b/java/src/test/java/org/rocksdb/StatsCallbackMock.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java rename to java/src/test/java/org/rocksdb/StatsCallbackMock.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java b/java/src/test/java/org/rocksdb/TableFilterTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java rename to java/src/test/java/org/rocksdb/TableFilterTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java b/java/src/test/java/org/rocksdb/TimedEnvTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java rename to java/src/test/java/org/rocksdb/TimedEnvTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java b/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java rename to java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java b/java/src/test/java/org/rocksdb/TransactionDBTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java rename to java/src/test/java/org/rocksdb/TransactionDBTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java b/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java rename to java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java b/java/src/test/java/org/rocksdb/TransactionOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java rename to java/src/test/java/org/rocksdb/TransactionOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java b/java/src/test/java/org/rocksdb/TransactionTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java rename to java/src/test/java/org/rocksdb/TransactionTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java b/java/src/test/java/org/rocksdb/TtlDBTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java rename to java/src/test/java/org/rocksdb/TtlDBTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/Types.java b/java/src/test/java/org/rocksdb/Types.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/Types.java rename to java/src/test/java/org/rocksdb/Types.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/VerifyChecksumsTest.java b/java/src/test/java/org/rocksdb/VerifyChecksumsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/VerifyChecksumsTest.java rename to java/src/test/java/org/rocksdb/VerifyChecksumsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java b/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java rename to java/src/test/java/org/rocksdb/WALRecoveryModeTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java b/java/src/test/java/org/rocksdb/WalFilterTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java rename to java/src/test/java/org/rocksdb/WalFilterTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java b/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java rename to java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java b/java/src/test/java/org/rocksdb/WriteBatchTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java rename to java/src/test/java/org/rocksdb/WriteBatchTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java b/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java rename to java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java rename to java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/java/src/test/java/org/rocksdb/WriteOptionsTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java rename to java/src/test/java/org/rocksdb/WriteOptionsTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java b/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java rename to java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java b/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java rename to java/src/test/java/org/rocksdb/test/RocksJunitRunner.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/test/TestableEventListener.java b/java/src/test/java/org/rocksdb/test/TestableEventListener.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/test/TestableEventListener.java rename to java/src/test/java/org/rocksdb/test/TestableEventListener.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java rename to java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java b/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java rename to java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java b/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java rename to java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java rename to java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java rename to java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java b/java/src/test/java/org/rocksdb/util/EnvironmentTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java rename to java/src/test/java/org/rocksdb/util/EnvironmentTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java rename to java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java b/java/src/test/java/org/rocksdb/util/IntComparatorTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java rename to java/src/test/java/org/rocksdb/util/IntComparatorTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java b/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java rename to java/src/test/java/org/rocksdb/util/JNIComparatorTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java b/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java rename to java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java b/java/src/test/java/org/rocksdb/util/SizeUnitTest.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java rename to java/src/test/java/org/rocksdb/util/SizeUnitTest.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java b/java/src/test/java/org/rocksdb/util/TestUtil.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java rename to java/src/test/java/org/rocksdb/util/TestUtil.java diff --git a/librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java b/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java similarity index 100% rename from librocksdb-sys/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java rename to java/src/test/java/org/rocksdb/util/WriteBatchGetter.java diff --git a/librocksdb-sys/rocksdb/java/understanding_options.md b/java/understanding_options.md similarity index 100% rename from librocksdb-sys/rocksdb/java/understanding_options.md rename to java/understanding_options.md diff --git a/librocksdb-sys/Cargo.toml b/librocksdb-sys/Cargo.toml deleted file mode 100644 index f343385..0000000 --- a/librocksdb-sys/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "librocksdb-sys" -version = "0.11.0+8.3.2" -edition = "2018" -rust-version = "1.60" -authors = ["Karl Hobley ", "Arkadiy Paronyan "] -license = "MIT/Apache-2.0/BSD-3-Clause" -description = "Native bindings to librocksdb" -readme = "README.md" -repository = "https://git.nextgraph.org/NextGraph/rust-rocksdb" -keywords = [ "bindings", "ffi", "rocksdb" ] -categories = [ "api-bindings", "database", "external-ffi-bindings" ] -links = "rocksdb" - -[features] -default = [ "static" ] -jemalloc = ["tikv-jemalloc-sys"] -static = ["libz-sys?/static", "bzip2-sys?/static"] -io-uring = [] -snappy = [] -lz4 = ["lz4-sys"] -zstd = ["zstd-sys"] -zlib = ["libz-sys"] -bzip2 = ["bzip2-sys"] -rtti = [] - -[dependencies] -libc = "0.2" -tikv-jemalloc-sys = { version = "0.5", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } -lz4-sys = { version = "1.9", optional = true } -zstd-sys = { version = "2.0", features = ["zdict_builder"], optional = true } -libz-sys = { version = "1.1", default-features = false, optional = true } -bzip2-sys = { version = "0.1", default-features = false, optional = true } - -[target.'cfg(not(any(target_os = "linux",target_os = "darwin",target_os = "openbsd")))'.dependencies.openssl] -version = "0.10" -features = ["vendored"] - -[dev-dependencies] -const-cstr = "0.3" -uuid = { version = "1.0", features = ["v4"] } - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -bindgen = { version = "0.65", default-features = false, features = ["runtime"] } -glob = "0.3" -pkg-config = { version = "0.3" } diff --git a/librocksdb-sys/Makefile b/librocksdb-sys/Makefile deleted file mode 100644 index c76fbd6..0000000 --- a/librocksdb-sys/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -include rocksdb/src.mk - -rocksdb_lib_sources.txt: rocksdb/src.mk - @echo -n "${LIB_SOURCES}" | tr ' ' '\n' > rocksdb_lib_sources.txt - -gen_lib_sources: rocksdb_lib_sources.txt diff --git a/librocksdb-sys/README.md b/librocksdb-sys/README.md deleted file mode 100644 index 65330a3..0000000 --- a/librocksdb-sys/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# RocksDB bindings - -Low-level bindings to [RocksDB's](https://github.com/facebook/rocksdb) C API. - -Based on original work by Tyler Neely -https://github.com/rust-rocksdb/rust-rocksdb -and Jeremy Fitzhardinge -https://github.com/jsgf/rocksdb-sys - -### Version - -The librocksdb-sys version number is in the format `X.Y.Z+RX.RY.RZ`, where -`X.Y.Z` is the version of this crate and follows SemVer conventions, while -`RX.RY.RZ` is the version of the bundled rocksdb. diff --git a/librocksdb-sys/api/.clang-format b/librocksdb-sys/api/.clang-format deleted file mode 100644 index 2296f7d..0000000 --- a/librocksdb-sys/api/.clang-format +++ /dev/null @@ -1,3 +0,0 @@ ---- -BasedOnStyle: Google -... diff --git a/librocksdb-sys/api/c.cc b/librocksdb-sys/api/c.cc deleted file mode 100644 index 9ffe710..0000000 --- a/librocksdb-sys/api/c.cc +++ /dev/null @@ -1,19 +0,0 @@ -#include "../rocksdb/include/rocksdb/version.h" -#include -#include "c.h" - -static char* CopyString(const std::string& str) { - char* result = reinterpret_cast(malloc(sizeof(char) * str.size()+1)); - memcpy(result, str.data(), sizeof(char) * str.size()); - result[sizeof(char) * str.size()] = 0; - return result; -} - -extern "C" { - -char* rocksdb_version() { - auto name = ROCKSDB_NAMESPACE::GetRocksVersionAsString(true); - return CopyString(name); -} - -} diff --git a/librocksdb-sys/api/c.h b/librocksdb-sys/api/c.h deleted file mode 100644 index a40e1fc..0000000 --- a/librocksdb-sys/api/c.h +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - -#ifdef __cplusplus -extern "C" { -#endif - -char* rocksdb_version(); - -#ifdef __cplusplus -} -#endif diff --git a/librocksdb-sys/build.rs b/librocksdb-sys/build.rs deleted file mode 100644 index 5e00dff..0000000 --- a/librocksdb-sys/build.rs +++ /dev/null @@ -1,491 +0,0 @@ -use std::path::Path; -use std::{env, fs, path::PathBuf, process::Command}; - -fn link(name: &str, bundled: bool) { - use std::env::var; - let target = var("TARGET").unwrap(); - let target: Vec<_> = target.split('-').collect(); - if target.get(2) == Some(&"windows") { - println!("cargo:rustc-link-lib=dylib={name}"); - if bundled && target.get(3) == Some(&"gnu") { - let dir = var("CARGO_MANIFEST_DIR").unwrap(); - println!("cargo:rustc-link-search=native={}/{}", dir, target[0]); - } - } -} - -fn fail_on_empty_directory(name: &str) { - if fs::read_dir(name).unwrap().count() == 0 { - println!("The `{name}` directory is empty, did you forget to pull the submodules?"); - println!("Try `git submodule update --init --recursive`"); - panic!(); - } -} - -fn rocksdb_include_dir() -> String { - match env::var("ROCKSDB_INCLUDE_DIR") { - Ok(val) => val, - Err(_) => "rocksdb/include".to_string(), - } -} - -fn bindgen_rocksdb() { - let bindings = bindgen::Builder::default() - .header(rocksdb_include_dir() + "/rocksdb/c.h") - .header("api/c.h") - .derive_debug(false) - .blocklist_type("max_align_t") // https://github.com/rust-lang-nursery/rust-bindgen/issues/550 - .ctypes_prefix("libc") - .size_t_is_usize(true) - .generate() - .expect("unable to generate rocksdb bindings"); - - let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); - bindings - .write_to_file(out_path.join("bindings.rs")) - .expect("unable to write rocksdb bindings"); -} - -fn build_rocksdb() { - let target = env::var("TARGET").unwrap(); - - let mut config = cc::Build::new(); - config.include("rocksdb/include/"); - config.include("rocksdb/"); - config.include("rocksdb/third-party/gtest-1.8.1/fused-src/"); - - if cfg!(feature = "snappy") { - config.define("SNAPPY", Some("1")); - if let Some(path) = env::var_os("DEP_SNAPPY_INCLUDE") { - config.include(path); - } - } - - if cfg!(feature = "lz4") { - config.define("LZ4", Some("1")); - config.include("lz4/"); - } - - if cfg!(feature = "zstd") { - config.define("ZSTD", Some("1")); - if let Some(path) = env::var_os("DEP_ZSTD_INCLUDE") { - config.include(path); - } - } - - if cfg!(feature = "zlib") { - config.define("ZLIB", Some("1")); - if let Some(path) = env::var_os("DEP_Z_INCLUDE") { - config.include(path); - } - } - - if cfg!(feature = "bzip2") { - config.define("BZIP2", Some("1")); - if let Some(path) = env::var_os("DEP_BZIP2_INCLUDE") { - config.include(path); - } - } - - if cfg!(feature = "rtti") { - config.define("USE_RTTI", Some("1")); - } - - config.include("."); - config.define("NDEBUG", Some("1")); - - let mut lib_sources = include_str!("rocksdb_lib_sources.txt") - .trim() - .split('\n') - .map(str::trim) - // We have a pre-generated a version of build_version.cc in the local directory - .filter(|file| !matches!(*file, "util/build_version.cc")) - .collect::>(); - - if let (true, Ok(_target_feature_value)) = ( - target.contains("x86_64"), - env::var("CARGO_CFG_TARGET_FEATURE"), - ) { - // This is needed to enable hardware CRC32C. Technically, SSE 4.2 is - // only available since Intel Nehalem (about 2010) and AMD Bulldozer - // (about 2011). - // let target_features: Vec<_> = target_feature_value.split(',').collect(); - - // if target_features.contains(&"sse2") { - // config.flag_if_supported("-msse2"); - // } - // if target_features.contains(&"sse4.1") { - // config.flag_if_supported("-msse4.1"); - // } - // if target_features.contains(&"sse4.2") { - // config.flag_if_supported("-msse4.2"); - // } - // // Pass along additional target features as defined in - // // build_tools/build_detect_platform. - // if target_features.contains(&"avx2") { - // config.flag_if_supported("-mavx2"); - // } - // if target_features.contains(&"bmi1") { - // config.flag_if_supported("-mbmi"); - // } - // if target_features.contains(&"lzcnt") { - // config.flag_if_supported("-mlzcnt"); - // } - // if !target.contains("android") && target_features.contains(&"pclmulqdq") { - // config.flag_if_supported("-mpclmul"); - // } - - // We want a portable library that can run on any x86_64. - // but we optimize for haswell which supports - // many or most of the available optimizations while still being compatible with - // most processors made since roughly 2013. - // if this becomes a problem for some app installers with older hardware, a special install - // file should be generated with a lib compiled without this flag - // config.flag("-march=haswell"); - // the flag has been moved to the darwin. openbsd, freebsd and linux cases below - } - - if target.contains("darwin") || (target.contains("linux") && !target.contains("android")) { - // on macos and linux we use the IPPCP plugin of rocksdb for the crypto (the lib is precompiled) - config.include("rocksdb/plugin/ippcp/library/include"); - lib_sources.push("plugin/ippcp/ippcp_provider.cc"); - let dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - println!( - "cargo:rustc-link-search=native={}", - Path::new(&dir) - .join("rocksdb/plugin/ippcp/library/macos/lib") - .display() - ); - println!("cargo:rustc-link-lib=static=ippcp"); - } else if !target.contains("openbsd") { - if let Some(include) = std::env::var_os("DEP_OPENSSL_INCLUDE") { - config.include(include); - } else { - config.include("rocksdb/plugin/openssl/include"); - } - lib_sources.push("plugin/openssl/openssl_provider.cc"); - // let dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - // println!( - // "cargo:rustc-link-search=dependency={}", - // Path::new(&dir) - // //.join("rocksdb/plugin/ippcp/library/macos/lib") - // .display() - // ); - // println!("cargo:rustc-link-lib=static=crypto"); - } - - if target.contains("apple-ios") { - config.define("OS_MACOSX", None); - config.define("IOS_CROSS_COMPILE", None); - config.define("PLATFORM", "IOS"); - config.define("NIOSTATS_CONTEXT", None); - config.define("NPERF_CONTEXT", None); - config.define("ROCKSDB_PLATFORM_POSIX", None); - config.define("ROCKSDB_LIB_IO_POSIX", None); - env::set_var("IPHONEOS_DEPLOYMENT_TARGET", "12.0"); - } else if target.contains("darwin") { - config.flag("-march=haswell"); - config.define("OS_MACOSX", None); - config.define("ROCKSDB_PLATFORM_POSIX", None); - config.define("ROCKSDB_LIB_IO_POSIX", None); - println!("cargo:rustc-link-arg=-mmacosx-version-min=10.14"); - config.flag("-Wshorten-64-to-32"); - config.flag("-mmacosx-version-min=10.14"); - config.define("DHAVE_FULLFSYNC", None); - config.define("HAVE_UINT128_EXTENSION", None); - config.flag_if_supported("-faligned-new"); - config.define("AVE_ALIGNED_NEW", None); - } else if target.contains("android") { - config.define("OS_ANDROID", None); - config.define("ROCKSDB_PLATFORM_POSIX", None); - config.define("ROCKSDB_LIB_IO_POSIX", None); - config.define("OPENSSL_NO_STDIO", None); - config.define("ANDROID_STL", "c++_shared"); - config.define("_REENTRANT", None); - config.flag("-fno-builtin-memcmp"); - } else if target.contains("linux") { - config.flag("-march=haswell"); - config.define("OS_LINUX", None); - config.define("ROCKSDB_PLATFORM_POSIX", None); - config.define("ROCKSDB_LIB_IO_POSIX", None); - config.define("ROCKSDB_FALLOCATE_PRESENT", None); - config.define("ROCKSDB_MALLOC_USABLE_SIZE", None); - config.define("ROCKSDB_PTHREAD_ADAPTIVE_MUTEX", None); - config.define("ROCKSDB_RANGESYNC_PRESENT", None); - config.define("ROCKSDB_SCHED_GETCPU_PRESENT", None); - config.define("ROCKSDB_AUXV_GETAUXVAL_PRESENT", None); - config.define("HAVE_UINT128_EXTENSION", None); - config.define("HAVE_ALIGNED_NEW", None); - println!("cargo:rustc-link-arg=-lpthread"); - println!("cargo:rustc-link-arg=-lrt"); - println!("cargo:rustc-link-arg=-ldl"); - config.flag("-fno-builtin-memcmp"); - } else if target.contains("freebsd") { - config.flag("-march=haswell"); - config.define("OS_FREEBSD", None); - config.define("ROCKSDB_PLATFORM_POSIX", None); - config.define("ROCKSDB_LIB_IO_POSIX", None); - println!("cargo:rustc-link-arg=-lpthread"); - config.flag("-fno-builtin-memcmp"); - config.define("_REENTRANT", None); - } else if target.contains("openbsd") { - //config.flag("-march=haswell"); - config.define("OS_OPENBSD", None); - config.define("ROCKSDB_PLATFORM_POSIX", None); - config.define("ROCKSDB_LIB_IO_POSIX", None); - println!("cargo:rustc-link-arg=-pthread"); - //println!("cargo:rustc-link-lib=execinfo"); - println!("cargo:rustc-link-lib=crypto"); - config.flag("-fno-builtin-memcmp"); - config.flag_if_supported("-faligned-new"); - config.flag("-Wshorten-64-to-32"); - config.define("ROCKSDB_BACKTRACE", None); - config.define("HAVE_UINT128_EXTENSION", None); - config.define("DHAVE_ALIGNED_NEW", None); - config.define("_REENTRANT", None); - //pkg_config::Config::new().probe("openssl").unwrap(); - config.include("rocksdb/plugin/openssl/include"); - lib_sources.push("plugin/openssl/openssl_provider.cc"); - - // let dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - // println!("cargo:rustc-link-lib=static=crypto"); - } else if target.contains("windows") { - link("rpcrt4", false); - link("shlwapi", false); - config.define("DWIN32", None); - config.define("OS_WIN", None); - config.define("_MBCS", None); - config.define("WIN64", None); - config.define("NOMINMAX", None); - config.define("ROCKSDB_WINDOWS_UTF8_FILENAMES", None); - - // Got some errors while using IPPCP plugin on windows. - // switching to openssl - - // let dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - // println!( - // "cargo:rustc-link-search=native={}", - // Path::new(&dir) - // .join("rocksdb/plugin/ippcp/library/win") - // .display() - // ); - // println!("cargo:rustc-link-lib=static=ippcpmt"); - - if &target == "x86_64-pc-windows-gnu" { - // Tell MinGW to create localtime_r wrapper of localtime_s function. - config.define("_POSIX_C_SOURCE", Some("1")); - // Tell MinGW to use at least Windows Vista headers instead of the ones of Windows XP. - // (This is minimum supported version of rocksdb) - config.define("_WIN32_WINNT", Some("_WIN32_WINNT_VISTA")); - } - - // Remove POSIX-specific sources - lib_sources = lib_sources - .iter() - .cloned() - .filter(|file| { - !matches!( - *file, - "port/port_posix.cc" - | "env/env_posix.cc" - | "env/fs_posix.cc" - | "env/io_posix.cc" - ) - }) - .collect::>(); - - // Add Windows-specific sources - lib_sources.extend([ - "port/win/env_default.cc", - "port/win/port_win.cc", - "port/win/xpress_win.cc", - "port/win/io_win.cc", - "port/win/win_thread.cc", - "port/win/env_win.cc", - "port/win/win_logger.cc", - ]); - - if cfg!(feature = "jemalloc") { - lib_sources.push("port/win/win_jemalloc.cc"); - } - } - - config.define("ROCKSDB_SUPPORT_THREAD_LOCAL", None); - - if cfg!(feature = "jemalloc") { - config.define("WITH_JEMALLOC", "ON"); - } - - #[cfg(feature = "io-uring")] - if target.contains("linux") { - pkg_config::probe_library("liburing") - .expect("The io-uring feature was requested but the library is not available"); - config.define("ROCKSDB_IOURING_PRESENT", Some("1")); - } - - if env::var("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap() != "64" { - config.define("_FILE_OFFSET_BITS", Some("64")); - config.define("_LARGEFILE64_SOURCE", Some("1")); - } - - if target.contains("msvc") { - config.flag("-EHsc"); - config.flag("-std:c++17"); - // set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") - // set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324") - } else { - if target.contains("x86_64") { - config.flag_if_supported("-Wstrict-prototypes"); - } - //-W -Wextra -Wall -pthread - //-fno-omit-frame-pointer - //-momit-leaf-frame-pointer - config.flag(&cxx_standard()); - // matches the flags in CMakeLists.txt from rocksdb - config.flag("-Wsign-compare"); - config.flag("-Wshadow"); - config.flag("-Wno-unused-parameter"); - config.flag("-Wno-unused-variable"); - config.flag("-Woverloaded-virtual"); - config.flag("-Wnon-virtual-dtor"); - config.flag("-Wno-missing-field-initializers"); - config.flag("-Wno-strict-aliasing"); - config.flag("-Wno-invalid-offsetof"); - } - - for file in lib_sources { - config.file(format!("rocksdb/{file}")); - } - - config.file("build_version.cc"); - config.file("api/c.cc"); - - config.cpp(true); - config.flag_if_supported("-std=c++17"); - - config.compile("librocksdb.a"); -} - -fn build_snappy() { - let target = env::var("TARGET").unwrap(); - let endianness = env::var("CARGO_CFG_TARGET_ENDIAN").unwrap(); - let mut config = cc::Build::new(); - - config.include("snappy/"); - config.include("."); - config.define("NDEBUG", Some("1")); - config.extra_warnings(false); - - if target.contains("msvc") { - config.flag("-EHsc"); - } else { - // Snappy requires C++11. - // See: https://github.com/google/snappy/blob/master/CMakeLists.txt#L32-L38 - config.flag("-std=c++11"); - } - - if endianness == "big" { - config.define("SNAPPY_IS_BIG_ENDIAN", Some("1")); - } - - config.file("snappy/snappy.cc"); - config.file("snappy/snappy-sinksource.cc"); - config.file("snappy/snappy-c.cc"); - config.cpp(true); - config.compile("libsnappy.a"); -} - -fn try_to_find_and_link_lib(lib_name: &str) -> bool { - println!("cargo:rerun-if-env-changed={lib_name}_COMPILE"); - if let Ok(v) = env::var(format!("{lib_name}_COMPILE")) { - if v.to_lowercase() == "true" || v == "1" { - return false; - } - } - - println!("cargo:rerun-if-env-changed={lib_name}_LIB_DIR"); - println!("cargo:rerun-if-env-changed={lib_name}_STATIC"); - - if let Ok(lib_dir) = env::var(format!("{lib_name}_LIB_DIR")) { - println!("cargo:rustc-link-search=native={lib_dir}"); - let mode = match env::var_os(format!("{lib_name}_STATIC")) { - Some(_) => "static", - None => "dylib", - }; - println!("cargo:rustc-link-lib={}={}", mode, lib_name.to_lowercase()); - return true; - } - false -} - -fn cxx_standard() -> String { - env::var("ROCKSDB_CXX_STD").map_or("-std=c++17".to_owned(), |cxx_std| { - if !cxx_std.starts_with("-std=") { - format!("-std={cxx_std}") - } else { - cxx_std - } - }) -} - -fn update_submodules() { - let program = "git"; - let dir = "../"; - let args = ["submodule", "update", "--init"]; - println!( - "Running command: \"{} {}\" in dir: {}", - program, - args.join(" "), - dir - ); - let ret = Command::new(program).current_dir(dir).args(args).status(); - - match ret.map(|status| (status.success(), status.code())) { - Ok((true, _)) => (), - Ok((false, Some(c))) => panic!("Command failed with error code {}", c), - Ok((false, None)) => panic!("Command got killed"), - Err(e) => panic!("Command failed with error: {}", e), - } -} - -fn main() { - if !Path::new("rocksdb/AUTHORS").exists() { - update_submodules(); - } - let target = env::var("TARGET").unwrap(); - if target.contains("openbsd") { - env::set_var("LIBCLANG_PATH", "/usr/local/lib"); - } else if target.contains("windows") { - env::set_var("LIBCLANG_PATH", "C:\\Program Files\\LLVM\\bin"); - } - - bindgen_rocksdb(); - - if !try_to_find_and_link_lib("ROCKSDB") { - println!("cargo:rerun-if-changed=rocksdb/"); - fail_on_empty_directory("rocksdb"); - build_rocksdb(); - } else { - let target = env::var("TARGET").unwrap(); - // according to https://github.com/alexcrichton/cc-rs/blob/master/src/lib.rs#L2189 - if target.contains("apple") || target.contains("freebsd") || target.contains("openbsd") { - println!("cargo:rustc-link-lib=dylib=c++"); - } else if target.contains("linux") { - println!("cargo:rustc-link-lib=dylib=stdc++"); - } - } - if cfg!(feature = "snappy") && !try_to_find_and_link_lib("SNAPPY") { - println!("cargo:rerun-if-changed=snappy/"); - fail_on_empty_directory("snappy"); - build_snappy(); - } - - // Allow dependent crates to locate the sources and output directory of - // this crate. Notably, this allows a dependent crate to locate the RocksDB - // sources and built archive artifacts provided by this crate. - println!( - "cargo:cargo_manifest_dir={}", - env::var("CARGO_MANIFEST_DIR").unwrap() - ); - println!("cargo:out_dir={}", env::var("OUT_DIR").unwrap()); -} diff --git a/librocksdb-sys/build_version.cc b/librocksdb-sys/build_version.cc deleted file mode 100644 index 9277cf4..0000000 --- a/librocksdb-sys/build_version.cc +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -#include - -#include "rocksdb/version.h" -#include "rocksdb/utilities/object_registry.h" -#include "util/string_util.h" - -// The build script may replace these values with real values based -// on whether or not GIT is available and the platform settings -static const std::string rocksdb_build_git_sha = "3f7c92b9753b697ce6a5ea737086d2751f17956c"; -static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v8.3.2"; -#define HAS_GIT_CHANGES 0 -#if HAS_GIT_CHANGES == 0 -// If HAS_GIT_CHANGES is 0, the GIT date is used. -// Use the time the branch/tag was last modified -static const std::string rocksdb_build_date = "rocksdb_build_date:2023-06-15 05:32:14"; -#else -// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications. -// Use the time the build was created. -static const std::string rocksdb_build_date = "rocksdb_build_date:2023-06-15 05:32:14"; -#endif - -std::unordered_map ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {}; - -extern "C" bool RocksDbIOUringEnable() { - return true; -} - -namespace ROCKSDB_NAMESPACE { -static void AddProperty(std::unordered_map *props, const std::string& name) { - size_t colon = name.find(":"); - if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) { - // If we found a "@:", then this property was a build-time substitution that failed. Skip it - size_t at = name.find("@", colon); - if (at != colon + 1) { - // Everything before the colon is the name, after is the value - (*props)[name.substr(0, colon)] = name.substr(colon + 1); - } - } -} - -static std::unordered_map* LoadPropertiesSet() { - auto * properties = new std::unordered_map(); - AddProperty(properties, rocksdb_build_git_sha); - AddProperty(properties, rocksdb_build_git_tag); - AddProperty(properties, rocksdb_build_date); - return properties; -} - -const std::unordered_map& GetRocksBuildProperties() { - static std::unique_ptr> props(LoadPropertiesSet()); - return *props; -} - -std::string GetRocksVersionAsString(bool with_patch) { - std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR); - if (with_patch) { - return version + "." + std::to_string(ROCKSDB_PATCH); - } else { - return version; - } -} - -std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) { - std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true); - if (verbose) { - for (const auto& it : GetRocksBuildProperties()) { - info.append("\n "); - info.append(it.first); - info.append(": "); - info.append(it.second); - } - } - return info; -} -} // namespace ROCKSDB_NAMESPACE diff --git a/librocksdb-sys/lz4/.circleci/config.yml b/librocksdb-sys/lz4/.circleci/config.yml deleted file mode 100644 index ae5aa39..0000000 --- a/librocksdb-sys/lz4/.circleci/config.yml +++ /dev/null @@ -1,75 +0,0 @@ -# This configuration was automatically generated from a CircleCI 1.0 config. -# It should include any build commands you had along with commands that CircleCI -# inferred from your project structure. We strongly recommend you read all the -# comments in this file to understand the structure of CircleCI 2.0, as the idiom -# for configuration has changed substantially in 2.0 to allow arbitrary jobs rather -# than the prescribed lifecycle of 1.0. In general, we recommend using this generated -# configuration as a reference rather than using it in production, though in most -# cases it should duplicate the execution of your original 1.0 config. -version: 2 -jobs: - build: - working_directory: ~/lz4/lz4 - # Parallelism is broken in this file : it just plays the same tests twice. - # The script will have to be modified to support parallelism properly - # In the meantime, set it to 1. - parallelism: 1 - shell: /bin/bash --login - # CircleCI 2.0 does not support environment variables that refer to each other the same way as 1.0 did. - # If any of these refer to each other, rewrite them so that they don't or see https://circleci.com/docs/2.0/env-vars/#interpolating-environment-variables-to-set-other-environment-variables . - environment: - CIRCLE_ARTIFACTS: /tmp/circleci-artifacts - CIRCLE_TEST_REPORTS: /tmp/circleci-test-results - # In CircleCI 1.0 we used a pre-configured image with a large number of languages and other packages. - # In CircleCI 2.0 you can now specify your own image, or use one of our pre-configured images. - # The following configuration line tells CircleCI to use the specified docker image as the runtime environment for you job. - # We have selected a pre-built image that mirrors the build environment we use on - # the 1.0 platform, but we recommend you choose an image more tailored to the needs - # of each job. For more information on choosing an image (or alternatively using a - # VM instead of a container) see https://circleci.com/docs/2.0/executor-types/ - # To see the list of pre-built images that CircleCI provides for most common languages see - # https://circleci.com/docs/2.0/circleci-images/ - docker: - - image: fbopensource/lz4-circleci-primary:0.0.4 - steps: - # Machine Setup - # If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each - # The following `checkout` command checks out your code to your working directory. In 1.0 we did this implicitly. In 2.0 you can choose where in the course of a job your code should be checked out. - - checkout - # Prepare for artifact and test results collection equivalent to how it was done on 1.0. - # In many cases you can simplify this from what is generated here. - # 'See docs on artifact collection here https://circleci.com/docs/2.0/artifacts/' - - run: mkdir -p $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS - # Test - # This would typically be a build job when using workflows, possibly combined with build - # This is based on your 1.0 configuration file or project settings - - run: CFLAGS= make clangtest && make clean - - run: g++ -v; make cxxtest && make clean - - run: gcc -v; g++ -v; make ctocpptest && make clean - - run: gcc-5 -v; CC=gcc-5 CFLAGS="-O2 -Werror" make check && make clean - - run: gcc-5 -v; CC=gcc-5 CFLAGS="-O2 -m32 -Werror" CPPFLAGS=-I/usr/include/x86_64-linux-gnu make check && make clean - - run: gcc-6 -v; CC=gcc-6 MOREFLAGS="-O2 -Werror" make check && make clean - - run: make cmake && make clean - - run: make -C tests test-lz4 - - run: make -C tests test-lz4c - - run: make -C tests test-frametest - - run: make -C tests test-fuzzer && make clean - - run: make -C lib all && make clean - - run: pyenv global 3.4.4; make versionsTest MOREFLAGS=-I/usr/include/x86_64-linux-gnu && make clean - - run: make travis-install && make clean - - run: gcc -v; CFLAGS="-O2 -m32 -Werror" CPPFLAGS=-I/usr/include/x86_64-linux-gnu make check && make clean - - run: clang -v; make staticAnalyze && make clean - - run: make platformTest CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static && make clean - - run: make platformTest CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS=-m64 && make clean - - run: make platformTest CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static && make clean - - run: make platformTest CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static && make clean - # Teardown - # If you break your build into multiple jobs with workflows, you will probably want to do the parts of this that are relevant in each - # Save test results - - store_test_results: - path: /tmp/circleci-test-results - # Save artifacts - - store_artifacts: - path: /tmp/circleci-artifacts - - store_artifacts: - path: /tmp/circleci-test-results diff --git a/librocksdb-sys/lz4/.circleci/images/primary/Dockerfile b/librocksdb-sys/lz4/.circleci/images/primary/Dockerfile deleted file mode 100644 index 7767014..0000000 --- a/librocksdb-sys/lz4/.circleci/images/primary/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM circleci/buildpack-deps:bionic - -RUN sudo apt-get -y -qq update -RUN sudo apt-get -y install software-properties-common -RUN sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test -RUN sudo apt-get -y install cmake -RUN sudo apt-get -y install qemu-system-ppc qemu-user-static qemu-system-arm -RUN sudo apt-get -y install libc6-dev-armel-cross libc6-dev-arm64-cross libc6-dev-i386 -RUN sudo apt-get -y install clang clang-tools -RUN sudo apt-get -y install gcc-5 gcc-5-multilib gcc-6 -RUN sudo apt-get -y install valgrind -RUN sudo apt-get -y install gcc-multilib-powerpc-linux-gnu gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi gcc-aarch64-linux-gnu diff --git a/librocksdb-sys/lz4/.cirrus.yml b/librocksdb-sys/lz4/.cirrus.yml deleted file mode 100644 index e423538..0000000 --- a/librocksdb-sys/lz4/.cirrus.yml +++ /dev/null @@ -1,5 +0,0 @@ -freebsd_instance: - image_family: freebsd-12-2 - -task: - script: pkg install -y gmake && gmake test diff --git a/librocksdb-sys/lz4/.gitattributes b/librocksdb-sys/lz4/.gitattributes deleted file mode 100644 index 6212bd4..0000000 --- a/librocksdb-sys/lz4/.gitattributes +++ /dev/null @@ -1,21 +0,0 @@ -# Set the default behavior -* text eol=lf - -# Explicitly declare source files -*.c text eol=lf -*.h text eol=lf - -# Denote files that should not be modified. -*.odt binary -*.png binary - -# Visual Studio -*.sln text eol=crlf -*.vcxproj* text eol=crlf -*.vcproj* text eol=crlf -*.suo binary -*.rc text eol=crlf - -# Windows -*.bat text eol=crlf -*.cmd text eol=crlf diff --git a/librocksdb-sys/lz4/.github/ISSUE_TEMPLATE/bug_report.md b/librocksdb-sys/lz4/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index e47afe7..0000000 --- a/librocksdb-sys/lz4/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**Expected behavior** -Please describe what you expected to happen. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error '...' -If applicable, add screenshots to help explain your problem. - -**System (please complete the following information):** - - OS: [e.g. Mac] - - Version [e.g. 22] - - Compiler [e.g. gcc] - - Build System [e.g. Makefile] - - Other hardware specs [e.g. Core 2 duo...] - -**Additional context** -Add any other context about the problem here. diff --git a/librocksdb-sys/lz4/.github/ISSUE_TEMPLATE/feature_request.md b/librocksdb-sys/lz4/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index bbcbbe7..0000000 --- a/librocksdb-sys/lz4/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/librocksdb-sys/lz4/.github/workflows/README.md b/librocksdb-sys/lz4/.github/workflows/README.md deleted file mode 100644 index 306d875..0000000 --- a/librocksdb-sys/lz4/.github/workflows/README.md +++ /dev/null @@ -1,61 +0,0 @@ -This directory contains [GitHub Actions](https://github.com/features/actions) workflow files. - -# Known issues - -## USAN, ASAN (`lz4-ubsan-x64`, `lz4-ubsan-x86`, `lz4-asan-x64`) - -For now, `lz4-ubsan-*` ignores the exit code of `make usan` and `make usan32`. -Because there are several issues which may take relatively long time to resolve. - -We'll fully enable it when we ensure `make usan` is ready for all commits and PRs. - -See [#983](https://github.com/lz4/lz4/pull/983) for details. - - -## C Compilers (`lz4-c-compilers`) - -- Our test doesn't use `gcc-4.5` due to installation issue of its package. (`apt-get install gcc-4.5` fails on GH-Actions VM) - -- Currently, the following 32bit executable tests fail with all versions of `clang`. - - `CC=clang-X CFLAGS='-O3' make V=1 -C tests clean test-lz4c32` - - `CC=clang-X CFLAGS='-O3 -mx32' make V=1 -C tests clean test-lz4c32` - - See [#991](https://github.com/lz4/lz4/issues/991) for details. - -- Currently, the following 32bit executable tests fail with `gcc-11` - - `CC=gcc-11 CFLAGS='-O3' make V=1 -C tests clean test-lz4c32` - - `CC=gcc-11 CFLAGS='-O3 -mx32' make V=1 -C tests clean test-lz4c32` - - See [#991](https://github.com/lz4/lz4/issues/991) for details. - - -## cppcheck (`lz4-cppcheck`) - -This test script ignores the exit code of `make cppcheck`. -Because this project doesn't 100% follow their recommendation. -Also sometimes it reports false positives. - - - -# Notes - -- You can investigate various information at the right pane of GitHub - Actions report page. - -| Item | Section in the right pane | -| ------------------------- | ------------------------------------- | -| OS, VM | Set up job | -| git repo, commit hash | Run actions/checkout@v2 | -| Version of tools | Environment info | - - - -# Difference with `.travis.yml` - -The following tests are not included yet. - -- name: Compile OSS-Fuzz targets - -The following tests will not be included due to limitation of GH-Actions. - -- name: aarch64 real-hw tests -- name: PPC64LE real-hw tests -- name: IBM s390x real-hw tests diff --git a/librocksdb-sys/lz4/.github/workflows/ci.yml b/librocksdb-sys/lz4/.github/workflows/ci.yml deleted file mode 100644 index c490bc7..0000000 --- a/librocksdb-sys/lz4/.github/workflows/ci.yml +++ /dev/null @@ -1,800 +0,0 @@ -# For details, see README.md in this directory. - -############################################################### -# C compilers -# -# - gcc -# - clang -# -# Known Issue -# - All test cases which described as 'fail' must be fixed and replaced with 'true'. -# - gcc-11 (x32, x86) : "../lib/lz4hc.c:148: LZ4HC_countBack: Assertion `(size_t)(match - mMin) < (1U<<31)' failed." -# - all clangs (x32, x86) : "../lib/lz4hc.c:282: int LZ4HC_InsertAndGetWiderMatch(...): Assertion `matchPtr >= lowPrefixPtr' failed." -# -name: lz4 CI -on: [push, pull_request] -permissions: - contents: read - -jobs: - lz4-c-compilers: - name: CC=${{ matrix.cc }}, ${{ matrix.os }} - strategy: - fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix failed. - matrix: - include: [ - # You can access the following values via ${{ matrix.??? }} - # - # pkgs : apt-get package names. It can include multiple package names which are delimited by space. - # cc : C compiler executable. - # cxx : C++ compiler executable for `make ctocpptest`. - # x32 : Set 'true' if compiler supports x32. Otherwise, set 'false'. - # Set 'fail' if it supports x32 but fails for now. 'fail' cases must be removed. - # x86 : Set 'true' if compiler supports x86 (-m32). Otherwise, set 'false'. - # Set 'fail' if it supports x86 but fails for now. 'fail' cases must be removed. - # cxxtest : Set 'true' if it can be compiled as C++ code. Otherwise, set 'false'. - # freestanding : Set 'true' if it can be compiled and execute freestanding code. Otherwise, set 'false'. - # Usually, it requires Linux, x86_64 and gcc/g++. - # os : GitHub Actions YAML workflow label. See https://github.com/actions/virtual-environments#available-environments - - # cc - { pkgs: '', cc: cc, cxx: c++, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-latest, }, - - # gcc - { pkgs: '', cc: gcc, cxx: g++, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-latest, }, - { pkgs: 'gcc-12 g++-12 lib32gcc-12-dev libx32gcc-12-dev', cc: gcc-12, cxx: g++-12, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-22.04, }, - { pkgs: 'gcc-11 g++-11 lib32gcc-11-dev libx32gcc-11-dev', cc: gcc-11, cxx: g++-11, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-22.04, }, - { pkgs: 'gcc-10 g++-10 lib32gcc-10-dev libx32gcc-10-dev', cc: gcc-10, cxx: g++-10, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-22.04, }, - { pkgs: 'gcc-9 g++-9 lib32gcc-9-dev libx32gcc-9-dev', cc: gcc-9, cxx: g++-9, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-22.04, }, - { pkgs: 'gcc-8 g++-8 lib32gcc-8-dev libx32gcc-8-dev', cc: gcc-8, cxx: g++-8, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-20.04, }, - { pkgs: 'gcc-7 g++-7 lib32gcc-7-dev libx32gcc-7-dev', cc: gcc-7, cxx: g++-7, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-20.04, }, - { pkgs: 'gcc-6 g++-6 lib32gcc-6-dev libx32gcc-6-dev', cc: gcc-6, cxx: g++-6, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-18.04, }, - { pkgs: 'gcc-5 g++-5 lib32gcc-5-dev libx32gcc-5-dev', cc: gcc-5, cxx: g++-5, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-18.04, }, - { pkgs: 'gcc-4.8 g++-4.8 lib32gcc-4.8-dev libx32gcc-4.8-dev', cc: gcc-4.8, cxx: g++-4.8, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'true', os: ubuntu-18.04, }, - - # clang - { pkgs: 'lib32gcc-11-dev libx32gcc-11-dev', cc: clang, cxx: clang++, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-latest, }, - { pkgs: 'clang-14 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-14, cxx: clang++-14, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-22.04, }, - { pkgs: 'clang-13 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-13, cxx: clang++-13, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-22.04, }, - { pkgs: 'clang-12 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-12, cxx: clang++-12, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-22.04, }, - { pkgs: 'clang-11 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-11, cxx: clang++-11, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-22.04, }, - { pkgs: 'clang-10 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-10, cxx: clang++-10, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-20.04, }, - { pkgs: 'clang-9 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-9, cxx: clang++-9, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-20.04, }, - { pkgs: 'clang-8 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-8, cxx: clang++-8, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-20.04, }, - { pkgs: 'clang-7 lib32gcc-7-dev libx32gcc-7-dev', cc: clang-7, cxx: clang++-7, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-20.04, }, - { pkgs: 'clang-6.0 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-6.0, cxx: clang++-6.0, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-20.04, }, - { pkgs: 'clang-5.0 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-5.0, cxx: clang++-5.0, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-18.04, }, - { pkgs: 'clang-4.0 lib32gcc-11-dev libx32gcc-11-dev', cc: clang-4.0, cxx: clang++-4.0, x32: 'true', x86: 'true', cxxtest: 'true', freestanding: 'false', os: ubuntu-18.04, }, - { pkgs: 'clang-3.9', cc: clang-3.9, cxx: clang++-3.9, x32: 'fail', x86: 'fail', cxxtest: 'false', freestanding: 'false', os: ubuntu-18.04, }, - ] - - runs-on: ${{ matrix.os }} - env: # Set environment variables - # We globally set CC and CXX to improve compatibility with .travis.yml - CC: ${{ matrix.cc }} - CXX: ${{ matrix.cxx }} - FIXME__LZ4_CI_IGNORE : ' echo Error. But we ignore it for now.' - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - sudo apt-get install ${{ matrix.pkgs }} - - - name: Environment info - run: | - echo && type $CC && which $CC && $CC --version - echo && type $CXX && which $CXX && $CXX --version - - - name: make - if: always() - run: make V=1 - - - name: install test - if: always() - run: make clean; make V=1 -C tests test-install - - - name: make all - if: always() - run: make V=1 clean all - - - name: make c_standards (C90) - if: always() - run: make V=1 clean c_standards_c90 - - - name: make c_standards (C11) - if: always() - run: make V=1 clean c_standards_c11 - - - name: make c-to-c++ - if: always() - run: make V=1 clean ctocpptest - - - name: make cxxtest - if: ${{ matrix.cxxtest == 'true' }} - run: make V=1 clean cxxtest - - - name: make test-freestanding - if: ${{ matrix.freestanding == 'true' }} - run: make V=1 clean test-freestanding - - - name: make -C programs default - if: always() - run: make V=1 -C programs clean default - - - name: make -C programs default -D_FORTIFY_SOURCE=2 - if: always() - run: CFLAGS='-fPIC' LDFLAGS='-pie -fPIE -D_FORTIFY_SOURCE=2' make V=1 -C programs clean default - - - name: make -C tests test-lz4 - if: always() - run: make clean; MOREFLAGS='-Werror' make -j V=1 -C tests test-lz4 - - - name: make clangtest (clang only) - if: ${{ startsWith( matrix.cc , 'clang' ) }} - run: make V=1 clean clangtest - - - name: make -C tests test MOREFLAGS='-mx32' - if: ${{ matrix.x32 == 'true' }} - run: make clean; LDFLAGS='-Wl,--verbose' MOREFLAGS='-mx32' make -j V=1 -C tests test - - - name: make -C tests test-lz4c32 - if: ${{ matrix.x86 == 'true' }} - run: LDFLAGS='-Wl,--verbose' MOREFLAGS='-Werror' make V=1 -C tests clean test-lz4c32 - - - ############################################################### - # # - # Remove this block when we stabilize the tests. # - # # - - - name: make -C tests test MOREFLAGS='-mx32' || echo Ignore failure for now. - if: ${{ matrix.x32 == 'fail' }} - run: make clean; LDFLAGS='-Wl,--verbose' MOREFLAGS='-mx32' make -j V=1 -C tests test || $FIXME__LZ4_CI_IGNORE - - - name: make -C tests test-lz4c32 || echo Ignore failure for now. - if: ${{ matrix.x86 == 'fail' }} - run: make clean; LDFLAGS='-Wl,--verbose' MOREFLAGS='-Werror' make V=1 -C tests test-lz4c32 || $FIXME__LZ4_CI_IGNORE - - # # - ############################################################### - - - -############################################################### -# LZ4 self tests -# -# - Benchmark -# - Fuzzer -# - LZ4 Frame -# - LZ4 versions -# - Custom LZ4_DISTANCE_MAX -# - lz4-benchmark: - name: Benchmark - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - - - name: benchmark (-C tests test-lz4) - run: make -j V=1 -C tests test-lz4 - - - name: benchmark (-C tests test-lz4c) - run: make -j V=1 -C tests test-lz4c - - - name: benchmark (-C tests test-lz4c32) - run: make V=1 -C tests test-lz4c32 - - - name: benchmark (-C tests test-fullbench) - run: make V=1 -C tests test-fullbench - - - name: benchmark (-C tests test-fullbench32) - run: make V=1 -C tests test-fullbench32 - - - lz4-fuzzer: - name: Fuzzer test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - - - name: setup - run: sudo sysctl -w vm.mmap_min_addr=4096 - - - name: fuzzer - run: make V=1 -C tests test-fuzzer - - - name: fuzzer32 - run: make V=1 -C tests test-fuzzer32 - - - lz4-standard-makefile-variables: - name: LZ4 Makefile - support for standard variables - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: make standard_variables - run: make V=1 standard_variables - - - lz4-versions: - name: LZ4 versions test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - - - name: make -C tests versionsTest - run: make V=1 -C tests versionsTest - - - lz4-abi: - name: LZ4 inter-versions ABI test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - - - name: make -C tests abiTests - run: make V=1 -C tests abiTests - - - lz4-frame: - name: LZ4 frame test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - - - name: LZ4 frame test - run: make V=1 -C tests test-frametest - - - name: LZ4 frame test (32-bit) - run: make V=1 -C tests test-frametest32 - - lz4-memory-usage: - name: test different values of LZ4_MEMORY_USAGE - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: LZ4_MEMORY_USAGE - run: make V=1 -C tests test-compile-with-lz4-memory-usage - - # Custom LZ4_DISTANCE_MAX ; lz4-wlib (CLI linked to dynamic library); LZ4_USER_MEMORY_FUNCTIONS - lz4-custom-distance: - name: Custom LZ4_DISTANCE_MAX - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: custom LZ4_DISTANCE_MAX; test LZ4_USER_MEMORY_FUNCTIONS - run: | - MOREFLAGS='-DLZ4_DISTANCE_MAX=8000' make V=1 check - make V=1 clean - make V=1 -C programs lz4-wlib - make V=1 clean - make V=1 -C tests fullbench-wmalloc # test LZ4_USER_MEMORY_FUNCTIONS - make V=1 clean - CC="c++ -Wno-deprecated" make V=1 -C tests fullbench-wmalloc # stricter function signature check - - # test block device compression #1086 - lz4cli-block-device: - name: Test lz4 compression on a block device - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: create a block device, compress it with lz4 # alternative : blindly use /dev/loop0, seems to always exist - run: | - make lz4 - dd if=/dev/zero of=full0.img bs=2M count=1 - BLOCK_DEVICE=$(sudo losetup --show -fP full0.img) - sudo chmod 666 $BLOCK_DEVICE - ./lz4 -v $BLOCK_DEVICE -c > /dev/null - sudo losetup -d $BLOCK_DEVICE - rm full0.img - - -############################################################### -# Check tools -# -# - cppcheck -# - scan-build -# - valgrind -# - ubsan -# - asan -# - unicode-lint -# - build examples -# - lz4-cppcheck: - name: make cppcheck - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install cppcheck - - - name: Environment info - run: echo && type cppcheck && which cppcheck && cppcheck --version - - - name: cppcheck - # This test script ignores the exit code of cppcheck. - # See known issues in README.md. - run: make V=1 clean cppcheck || echo There are some cppcheck reports but we ignore it. - - - lz4-scan-build: - name: make staticAnalyze - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install clang-tools - - - name: Environment info - run: | - echo && type gcc && which gcc && gcc --version - echo && type clang && which clang && clang --version - echo && type scan-build && which scan-build # scan-build doesn't have any --version equivalent option - echo && type make && which make && make -v - echo && cat /proc/cpuinfo || echo /proc/cpuinfo is not present - - - name: make staticAnalyze - run: make V=1 clean staticAnalyze - - - lz4-valgrind: - name: valgrind - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install valgrind - - - name: Environment info - run: | - echo && type cc && which cc && cc --version - echo && type valgrind && which valgrind && valgrind --version - - - name: valgrind - run: make V=1 -C tests test-mem - - - lz4-ubsan-x64: - name: Linux x64 ubsan - runs-on: ubuntu-latest - env: # Set environment variables - FIXME__LZ4_CI_IGNORE : ' echo Error. But we ignore it for now.' - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: ubsan - ######################################################### - # For now, we ignore the exit code of `make usan`. - # See "Known issues / lz4-ubsan-x64" in README.md - # When we'll resolve this issue, remove "|| $FIXME__LZ4_CI_IGNORE" - ######################################################### - run: make V=1 clean usan MOREFLAGS='-Wcomma -Werror' || $FIXME__LZ4_CI_IGNORE - - - lz4-ubsan-x86: - name: Linux x86 ubsan - runs-on: ubuntu-latest - env: # Set environment variables - FIXME__LZ4_CI_IGNORE : ' echo Error. But we ignore it for now.' - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - sudo apt-get install lib32gcc-11-dev - - - name: ubsan32 - ######################################################### - # For now, we ignore the exit code of `make usan32`. - # See "Known issues / lz4-ubsaan-x86" in README.md. - # When we'll resolve this issue, remove "|| $FIXME__LZ4_CI_IGNORE" - ######################################################### - run: CC=clang make V=1 clean usan32 MOREFLAGS='-Wcomma -Werror' || $FIXME__LZ4_CI_IGNORE - - - lz4-asan-x64: - name: Linux x64 ASAN - runs-on: ubuntu-latest - env: # Set environment variables - FIXME__LZ4_CI_IGNORE : ' echo Error. But we ignore it for now.' - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: setup - run: sudo sysctl -w vm.mmap_min_addr=4096 - - - name: frametest - run: CC=clang MOREFLAGS=-fsanitize=address make V=1 -C tests clean test-frametest - - - name: fuzzer - run: CC=clang MOREFLAGS=-fsanitize=address make V=1 -C tests clean test-fuzzer - - unicode-lint: - name: lint unicode in ./lib/, ./tests/ and ./programs/ - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: unicode lint - run: bash ./tests/unicode_lint.sh - - lz4-examples: - name: make examples - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: apt-get install - run: | - sudo apt-get update - - - name: Environment info - run: | - echo && type cc && which cc && cc --version - echo && type c++ && which c++ && c++ --version - - - name: examples - run: make V=1 clean examples - - - name: examples (compile as C++ code) - run: make V=1 -C examples clean cxxtest - - # lasts ~20mn - oss-fuzz: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - sanitizer: [address, undefined, memory] - steps: - - name: Build Fuzzers (${{ matrix.sanitizer }}) - id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master - with: - oss-fuzz-project-name: 'lz4' - dry-run: false - sanitizer: ${{ matrix.sanitizer }} - - name: Run Fuzzers (${{ matrix.sanitizer }}) - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master - with: - oss-fuzz-project-name: 'lz4' - fuzz-seconds: 600 - dry-run: false - sanitizer: ${{ matrix.sanitizer }} - - name: Upload Crash - uses: actions/upload-artifact@v1 - if: failure() && steps.build.outcome == 'success' - with: - name: ${{ matrix.sanitizer }}-artifacts - path: ./out/artifacts - - - -############################################################### -# Platforms -# -# - QEMU (ARM, ARM64, PPC, PPC64LE, S390X) -# - macOS -# - - # QEMU - # All tests use QEMU (static) and gcc cross compiler. - # - # note: - # We don't employ completely matrix method which provides `MOREFLAGS` - # etc in the matrix. Because some platform may need its special - # compiler options and test. - # For example, xxHash already has tests for scalar and SIMD version of - # it. But compiler options are quite different between platforms. - # - # So, please keep them simple and independent. - # - lz4-qemu-platforms: - name: QEMU ${{ matrix.type }} - strategy: - fail-fast: false # 'false' means Don't stop matrix workflows even if some matrix instance failed. - matrix: - include: [ - # You can access the following values via ${{ matrix.??? }} - # type : Architecture type for `if:` statement. - # pkgs : apt-get package names. You can include multiple packages which are delimited by space. - # xcc : gcc cross C compiler executable. - # xemu : QEMU static emulator executable. - # os : GitHub Actions YAML workflow label. See https://github.com/actions/virtual-environments#available-environments - - { type: ARM, pkgs: 'qemu-system-arm gcc-arm-linux-gnueabi', xcc: arm-linux-gnueabi-gcc, xemu: qemu-arm-static, os: ubuntu-latest, }, - { type: ARM64, pkgs: 'qemu-system-arm gcc-aarch64-linux-gnu', xcc: aarch64-linux-gnu-gcc, xemu: qemu-aarch64-static, os: ubuntu-latest, }, - { type: PPC, pkgs: 'qemu-system-ppc gcc-powerpc-linux-gnu', xcc: powerpc-linux-gnu-gcc, xemu: qemu-ppc-static, os: ubuntu-latest, }, - { type: PPC64LE, pkgs: 'qemu-system-ppc gcc-powerpc64le-linux-gnu', xcc: powerpc64le-linux-gnu-gcc, xemu: qemu-ppc64le-static, os: ubuntu-latest, }, - { type: S390X, pkgs: 'qemu-system-s390x gcc-s390x-linux-gnu', xcc: s390x-linux-gnu-gcc, xemu: qemu-s390x-static, os: ubuntu-latest, }, - ] - - runs-on: ${{ matrix.os }} - env: # Set environment variables - XCC: ${{ matrix.xcc }} - XEMU: ${{ matrix.xemu }} - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: apt-get install - run: | - sudo apt-get update - sudo apt-get install gcc-multilib - sudo apt-get install qemu-utils qemu-user-static - sudo apt-get install ${{ matrix.pkgs }} - - - name: Environment info - run: | - echo && type $XCC && which $XCC && $XCC --version - echo && $XCC -v # Show built-in specs - echo && type $XEMU && which $XEMU && $XEMU --version - - - name: ARM64 - if: ${{ matrix.type == 'ARM64' }} - run: make V=1 platformTest CC=$XCC QEMU_SYS=$XEMU - - - name: ARM - if: ${{ matrix.type == 'ARM' }} - run: make V=1 platformTest CC=$XCC QEMU_SYS=$XEMU - - - name: PPC - if: ${{ matrix.type == 'PPC' }} - run: make V=1 platformTest CC=$XCC QEMU_SYS=$XEMU - - - name: PPC64LE - if: ${{ matrix.type == 'PPC64LE' }} - run: make V=1 platformTest CC=$XCC QEMU_SYS=$XEMU MOREFLAGS=-m64 - - - name: S390X - if: ${{ matrix.type == 'S390X' }} - run: make V=1 platformTest CC=$XCC QEMU_SYS=$XEMU - - - # macOS - lz4-platform-macos-latest: - name: macOS - runs-on: macos-latest - steps: - - uses: actions/checkout@v2 - - - name: Environment info - run: | - echo && type cc && which cc && cc --version - echo && type make && which make && make -v - echo && sysctl -a | grep machdep.cpu # cpuinfo - - - name: make default - run: CFLAGS="-Werror" make V=1 clean default - - - name: make test - run: make clean; make -j V=1 test MOREFLAGS='-Werror -Wconversion -Wno-sign-conversion' - - - name: Ensure `make test` doesn't depend on the status of the console - # see issue #990 for detailed explanations - run: make -j test > /dev/null - - -############################################################### -# Build systems -# -# - make -# - cmake -# - meson -# - - # make - lz4-build-make: - name: make - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: Environment info - run: | - echo && type cc && which cc && cc --version - echo && type make && which make && make -v - - - name: make - run: make V=1 - - - lz4-build-make-travis-install: - name: make travis-install - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: travis-install - run: make V=1 clean travis-install - - - name: travis-install result - run: | - echo && echo Installed files - ( cd ~/install_test_dir; find .; ) - - - # cmake - lz4-build-cmake: - name: cmake - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - - name: Environment info - run: | - echo && type cmake && which cmake && cmake --version - echo && type make && which make && make -v - - - name: cmake - run: | - cd build/cmake - mkdir build - cd build - cmake .. - CFLAGS=-Werror make VERBOSE=1 - - - # Invoke cmake via Makefile - lz4-build-make-cmake: - name: make cmake - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - name: make cmake - # V=1 for lz4 Makefile, VERBOSE=1 for cmake Makefile. - run: make V=1 VERBOSE=1 clean cmake - - - # Meson - lz4-build-meson: - name: Meson + Ninja - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 # https://github.com/actions/checkout - - uses: actions/setup-python@v2 # https://github.com/actions/setup-python - with: - python-version: '3.x' - - - name: Install - run: | - sudo apt-get update - sudo apt-get install tree ninja-build - python -m pip install --upgrade pip - pip3 install --user meson - - - name: Environment info - run: | - echo && type clang && which clang && clang --version - echo && type python && which python && python --version - echo && type meson && which meson && meson --version - - - name: meson - # 'run: >' replaces all newlines in the following block with spaces - run: > - meson setup - --buildtype=debug - -Db_lundef=false - -Dauto_features=enabled - -Dprograms=true - -Dcontrib=true - -Dtests=true - -Dexamples=true - contrib/meson build - - - name: staging - run: | - cd build - DESTDIR=./staging ninja install - tree ./staging - - - -############################################################ -# Check git tag for LZ4 releases -# - lz4-check-tag: - name: git version tag checking for release - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: make -C tests checkTag - if: startsWith(github.ref, 'refs/tags/v') # If git tag name starts with 'v' - run: | - echo "tag=${GITHUB_REF#refs/*/}" - make -C tests checkTag - tests/checkTag ${GITHUB_REF#refs/*/} - - - -############################################################ -# Gather CI environment information. -# - lz4-env-info: - name: GH-Actions Virtual Env Info (${{ matrix.os }}) - strategy: - matrix: - include: [ - { os: ubuntu-latest, }, # https://github.com/actions/virtual-environments/ - { os: ubuntu-22.04, }, # https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2204-Readme.md - { os: ubuntu-20.04, }, # https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md - { os: ubuntu-18.04, }, # https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu1804-Readme.md - ] - - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v2 - - - name: init - run: | - sudo apt-get update - - - name: cc --version - run: echo && type cc && which cc && cc --version - - - name: gcc --version - run: echo && type gcc && which gcc && gcc --version - - - name: clang --version - run: echo && type clang && which clang && clang --version - - - name: make -v - run: echo && type make && which make && make -v - - - name: g++ --version - run: echo && type g++ && which g++ && g++ --version - - - name: git --version - run: echo && type git && which git && git --version - - - name: gcc packages (apt-cache) - run: apt-cache search gcc | grep "^gcc-[0-9\.]* " | sort - - - name: lib32gcc packages for i386 (apt-cache) - run: apt-cache search lib32gcc | grep "^lib32gcc-" | sort - - - name: libx32gcc packages for x32 (apt-cache) - run: apt-cache search libx32gcc | grep "^libx32gcc-" | sort - - - name: gcc multilib packages (apt-cache) - run: apt-cache search multilib | grep "gcc-" | sort - - - name: clang packages (apt-cache) - run: apt-cache search clang | grep "^clang-[0-9\.]* " | sort - - - name: QEMU packages (apt-cache) - run: apt-cache search qemu | grep "^qemu-system-.*QEMU full system" | sort diff --git a/librocksdb-sys/lz4/.gitignore b/librocksdb-sys/lz4/.gitignore deleted file mode 100644 index ed02057..0000000 --- a/librocksdb-sys/lz4/.gitignore +++ /dev/null @@ -1,43 +0,0 @@ -# Object files -*.o -*.ko - -# Libraries -*.lib -*.a - -# Shared objects (inc. Windows DLLs) -*.dll -*.so -*.so.* -*.dylib -*.dSYM # apple - -# Executables -*.exe -*.out -*.app -lz4 - -# IDE / editors files -.clang_complete -_codelite/ -_codelite_lz4/ -bin/ -*.zip -*.swp - -# analyzers -infer-out - -# Mac -.DS_Store -*.dSYM - -# Windows / Msys -nul -ld.exe* - -# test artifacts -*.lz4 -tmp* diff --git a/librocksdb-sys/lz4/.travis.yml b/librocksdb-sys/lz4/.travis.yml deleted file mode 100644 index 0aeea6e..0000000 --- a/librocksdb-sys/lz4/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: c - -matrix: - fast_finish: true - include: - - - name: aarch64 real-hw tests - arch: arm64 - script: - - make test - - - name: PPC64LE real-hw tests - arch: ppc64le - script: - - make test - - - name: IBM s390x real-hw tests - arch: s390x - script: - - make test - - # tag-specific test - - name: tag build - if: tag =~ ^v[0-9]\.[0-9] - os: linux - script: - - make -C tests checkTag - - tests/checkTag "$TRAVIS_BRANCH" - - # oss-fuzz compilation test - - name: Compile OSS-Fuzz targets - script: - - ./ossfuzz/travisoss.sh - - # Unicode lint - # See https://github.com/lz4/lz4/issues/1018 - - name: Run Unicode lint - script: - - ./tests/unicode_lint.sh - - allow_failures: - - env: ALLOW_FAILURES=true diff --git a/librocksdb-sys/lz4/INSTALL b/librocksdb-sys/lz4/INSTALL deleted file mode 100644 index 6aab067..0000000 --- a/librocksdb-sys/lz4/INSTALL +++ /dev/null @@ -1,16 +0,0 @@ -Installation -============= - -``` -make -make install # this command may require root access -``` - -LZ4's `Makefile` supports standard [Makefile conventions], -including [staged installs], [redirection], or [command redefinition]. -It is compatible with parallel builds (`-j#`). - -[Makefile conventions]: https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html -[staged installs]: https://www.gnu.org/prep/standards/html_node/DESTDIR.html -[redirection]: https://www.gnu.org/prep/standards/html_node/Directory-Variables.html -[command redefinition]: https://www.gnu.org/prep/standards/html_node/Utilities-in-Makefiles.html diff --git a/librocksdb-sys/lz4/LICENSE b/librocksdb-sys/lz4/LICENSE deleted file mode 100644 index 1b84cc3..0000000 --- a/librocksdb-sys/lz4/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -This repository uses 2 different licenses : -- all files in the `lib` directory use a BSD 2-Clause license -- all other files use a GPLv2 license, unless explicitly stated otherwise - -Relevant license is reminded at the top of each source file, -and with presence of COPYING or LICENSE file in associated directories. - -This model is selected to emphasize that -files in the `lib` directory are designed to be included into 3rd party applications, -while all other files, in `programs`, `tests` or `examples`, -are intended to be used "as is", as part of their intended scenarios, -with no intention to support 3rd party integration use cases. diff --git a/librocksdb-sys/lz4/Makefile b/librocksdb-sys/lz4/Makefile deleted file mode 100644 index e70c3db..0000000 --- a/librocksdb-sys/lz4/Makefile +++ /dev/null @@ -1,279 +0,0 @@ -# ################################################################ -# LZ4 - Makefile -# Copyright (C) Yann Collet 2011-2020 -# All rights reserved. -# -# BSD license -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, this -# list of conditions and the following disclaimer in the documentation and/or -# other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# You can contact the author at : -# - LZ4 source repository : https://github.com/lz4/lz4 -# - LZ4 forum froup : https://groups.google.com/forum/#!forum/lz4c -# ################################################################ - -LZ4DIR = lib -PRGDIR = programs -TESTDIR = tests -EXDIR = examples -FUZZDIR = ossfuzz - -include Makefile.inc - - -.PHONY: default -default: lib-release lz4-release - -# silent mode by default; verbose can be triggered by V=1 or VERBOSE=1 -$(V)$(VERBOSE).SILENT: - -.PHONY: all -all: allmost examples manuals build_tests - -.PHONY: allmost -allmost: lib lz4 - -.PHONY: lib lib-release liblz4.a -lib: liblz4.a -lib lib-release liblz4.a: - $(MAKE) -C $(LZ4DIR) $@ - -.PHONY: lz4 lz4-release -lz4 : liblz4.a -lz4-release : lib-release -lz4 lz4-release : - $(MAKE) -C $(PRGDIR) $@ - cp $(PRGDIR)/lz4$(EXT) . - -.PHONY: examples -examples: liblz4.a - $(MAKE) -C $(EXDIR) all - -.PHONY: manuals -manuals: - $(MAKE) -C contrib/gen_manual $@ - -.PHONY: build_tests -build_tests: - $(MAKE) -C $(TESTDIR) all - -.PHONY: clean -clean: - $(MAKE) -C $(LZ4DIR) $@ > $(VOID) - $(MAKE) -C $(PRGDIR) $@ > $(VOID) - $(MAKE) -C $(TESTDIR) $@ > $(VOID) - $(MAKE) -C $(EXDIR) $@ > $(VOID) - $(MAKE) -C $(FUZZDIR) $@ > $(VOID) - $(MAKE) -C contrib/gen_manual $@ > $(VOID) - $(RM) lz4$(EXT) - $(RM) -r $(CMAKE_BUILD_DIR) - @echo Cleaning completed - - -#----------------------------------------------------------------------------- -# make install is validated only for Posix environments -#----------------------------------------------------------------------------- -ifeq ($(POSIX_ENV),Yes) -HOST_OS = POSIX - -.PHONY: install uninstall -install uninstall: - $(MAKE) -C $(LZ4DIR) $@ - $(MAKE) -C $(PRGDIR) $@ - -.PHONY: travis-install -travis-install: - $(MAKE) -j1 install DESTDIR=~/install_test_dir - -endif # POSIX_ENV - - -CMAKE ?= cmake -CMAKE_BUILD_DIR ?= build/cmake/build -ifneq (,$(filter MSYS%,$(shell $(UNAME)))) -HOST_OS = MSYS -CMAKE_PARAMS = -G"MSYS Makefiles" -endif - -.PHONY: cmake -cmake: - mkdir -p $(CMAKE_BUILD_DIR) - cd $(CMAKE_BUILD_DIR); $(CMAKE) $(CMAKE_PARAMS) ..; $(CMAKE) --build . - - -#------------------------------------------------------------------------ -# make tests validated only for MSYS and Posix environments -#------------------------------------------------------------------------ -ifneq (,$(filter $(HOST_OS),MSYS POSIX)) - -.PHONY: list -list: - $(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs - -.PHONY: check -check: - $(MAKE) -C $(TESTDIR) test-lz4-essentials - -.PHONY: test -test: - $(MAKE) -C $(TESTDIR) $@ - $(MAKE) -C $(EXDIR) $@ - -.PHONY: clangtest -clangtest: CFLAGS += -Werror -Wconversion -Wno-sign-conversion -clangtest: CC = clang -clangtest: clean - $(CC) -v - $(MAKE) -C $(LZ4DIR) all CC=$(CC) - $(MAKE) -C $(PRGDIR) all CC=$(CC) - $(MAKE) -C $(TESTDIR) all CC=$(CC) - -.PHONY: clangtest-native -clangtest-native: CFLAGS = -O3 -Werror -Wconversion -Wno-sign-conversion -clangtest-native: clean - clang -v - $(MAKE) -C $(LZ4DIR) all CC=clang - $(MAKE) -C $(PRGDIR) native CC=clang - $(MAKE) -C $(TESTDIR) native CC=clang - -.PHONY: usan -usan: CC = clang -usan: CFLAGS = -O3 -g -fsanitize=undefined -fno-sanitize-recover=undefined -fsanitize-recover=pointer-overflow -usan: LDFLAGS = $(CFLAGS) -usan: clean - CC=$(CC) CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS)' $(MAKE) test FUZZER_TIME="-T30s" NB_LOOPS=-i1 - -.PHONY: usan32 -usan32: CFLAGS = -m32 -O3 -g -fsanitize=undefined -usan32: LDFLAGS = $(CFLAGS) -usan32: clean - $(MAKE) test FUZZER_TIME="-T30s" NB_LOOPS=-i1 - -SCANBUILD ?= scan-build -SCANBUILD_FLAGS += --status-bugs -v --force-analyze-debug-code -.PHONY: staticAnalyze -staticAnalyze: clean - CPPFLAGS=-DLZ4_DEBUG=1 CFLAGS=-g $(SCANBUILD) $(SCANBUILD_FLAGS) $(MAKE) all V=1 DEBUGLEVEL=1 - -.PHONY: cppcheck -cppcheck: - cppcheck . --force --enable=warning,portability,performance,style --error-exitcode=1 > /dev/null - -.PHONY: platformTest -platformTest: clean - @echo "\n ---- test lz4 with $(CC) compiler ----" - $(CC) -v - CFLAGS="-O3 -Werror" $(MAKE) -C $(LZ4DIR) all - CFLAGS="-O3 -Werror -static" $(MAKE) -C $(PRGDIR) all - CFLAGS="-O3 -Werror -static" $(MAKE) -C $(TESTDIR) all - $(MAKE) -C $(TESTDIR) test-platform - -.PHONY: versionsTest -versionsTest: clean - $(MAKE) -C $(TESTDIR) $@ - -.PHONY: test-freestanding -test-freestanding: - $(MAKE) -C $(TESTDIR) clean $@ - -.PHONY: cxxtest cxx32test -cxxtest cxx32test: CC := "$(CXX) -Wno-deprecated" -cxxtest cxx32test: CFLAGS = -O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror -cxx32test: CFLAGS += -m32 -cxxtest cxx32test: clean - $(CXX) -v - CC=$(CC) $(MAKE) -C $(LZ4DIR) all CFLAGS="$(CFLAGS)" - CC=$(CC) $(MAKE) -C $(PRGDIR) all CFLAGS="$(CFLAGS)" - CC=$(CC) $(MAKE) -C $(TESTDIR) all CFLAGS="$(CFLAGS)" - -.PHONY: cxx17build -cxx17build : CC = "$(CXX) -Wno-deprecated" -cxx17build : CFLAGS = -std=c++17 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror -pedantic -cxx17build : clean - $(CXX) -v - CC=$(CC) $(MAKE) -C $(LZ4DIR) all CFLAGS="$(CFLAGS)" - CC=$(CC) $(MAKE) -C $(PRGDIR) all CFLAGS="$(CFLAGS)" - CC=$(CC) $(MAKE) -C $(TESTDIR) all CFLAGS="$(CFLAGS)" - -.PHONY: ctocpptest -ctocpptest: LIBCC="$(CC)" -ctocpptest: TESTCC="$(CXX)" -ctocpptest: CFLAGS= -ctocpptest: clean - CC=$(LIBCC) $(MAKE) -C $(LZ4DIR) CFLAGS="$(CFLAGS)" all - CC=$(LIBCC) $(MAKE) -C $(TESTDIR) CFLAGS="$(CFLAGS)" lz4.o lz4hc.o lz4frame.o - CC=$(TESTCC) $(MAKE) -C $(TESTDIR) CFLAGS="$(CFLAGS)" all - -.PHONY: c_standards -c_standards: clean c_standards_c11 c_standards_c99 c_standards_c90 - -.PHONY: c_standards_c90 -c_standards_c90: clean - $(MAKE) clean; CFLAGS="-std=c90 -Werror -pedantic -Wno-long-long -Wno-variadic-macros" $(MAKE) allmost - $(MAKE) clean; CFLAGS="-std=gnu90 -Werror -pedantic -Wno-long-long -Wno-variadic-macros" $(MAKE) allmost - -.PHONY: c_standards_c99 -c_standards_c99: clean - $(MAKE) clean; CFLAGS="-std=c99 -Werror -pedantic" $(MAKE) all - $(MAKE) clean; CFLAGS="-std=gnu99 -Werror -pedantic" $(MAKE) all - -.PHONY: c_standards_c11 -c_standards_c11: clean - $(MAKE) clean; CFLAGS="-std=c11 -Werror" $(MAKE) all - -# The following test ensures that standard Makefile variables set through environment -# are correctly transmitted at compilation stage. -# This test is meant to detect issues like https://github.com/lz4/lz4/issues/958 -.PHONY: standard_variables -standard_variables: clean - @echo ================= - @echo Check support of Makefile Standard variables through environment - @echo note : this test requires V=1 to work properly - @echo ================= - CC="cc -DCC_TEST" \ - CFLAGS=-DCFLAGS_TEST \ - CPPFLAGS=-DCPPFLAGS_TEST \ - LDFLAGS=-DLDFLAGS_TEST \ - LDLIBS=-DLDLIBS_TEST \ - $(MAKE) V=1 > tmpsv - # Note: just checking the presence of custom flags - # would not detect situations where custom flags are - # supported in some part of the Makefile, and missed in others. - # So the test checks if they are present the _right nb of times_. - # However, checking static quantities makes this test brittle, - # because quantities (7, 2 and 1) can still evolve in future, - # for example when source directories or Makefile evolve. - if [ $$(grep CC_TEST tmpsv | wc -l) -ne 7 ]; then \ - echo "CC environment variable missed" && False; fi - if [ $$(grep CFLAGS_TEST tmpsv | wc -l) -ne 7 ]; then \ - echo "CFLAGS environment variable missed" && False; fi - if [ $$(grep CPPFLAGS_TEST tmpsv | wc -l) -ne 7 ]; then \ - echo "CPPFLAGS environment variable missed" && False; fi - if [ $$(grep LDFLAGS_TEST tmpsv | wc -l) -ne 2 ]; then \ - echo "LDFLAGS environment variable missed" && False; fi - if [ $$(grep LDLIBS_TEST tmpsv | wc -l) -ne 1 ]; then \ - echo "LDLIBS environment variable missed" && False; fi - @echo ================= - @echo all custom variables detected - @echo ================= - $(RM) tmpsv - -endif # MSYS POSIX diff --git a/librocksdb-sys/lz4/Makefile.inc b/librocksdb-sys/lz4/Makefile.inc deleted file mode 100644 index e78298c..0000000 --- a/librocksdb-sys/lz4/Makefile.inc +++ /dev/null @@ -1,111 +0,0 @@ -# ################################################################ -# LZ4 - Makefile common definitions -# Copyright (C) Yann Collet 2020 -# All rights reserved. -# -# BSD license -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, this -# list of conditions and the following disclaimer in the documentation and/or -# other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# You can contact the author at : -# - LZ4 source repository : https://github.com/lz4/lz4 -# - LZ4 forum froup : https://groups.google.com/forum/#!forum/lz4c -# ################################################################ - -UNAME ?= uname - -TARGET_OS ?= $(shell $(UNAME)) -ifeq ($(TARGET_OS),) - TARGET_OS ?= $(OS) -endif - -ifneq (,$(filter Windows%,$(TARGET_OS))) -LIBLZ4 = liblz4-$(LIBVER_MAJOR) -LIBLZ4_EXP = liblz4.lib -WINBASED = yes -else -LIBLZ4_EXP = liblz4.dll.a - ifneq (,$(filter MINGW%,$(TARGET_OS))) -LIBLZ4 = liblz4 -WINBASED = yes - else - ifneq (,$(filter MSYS%,$(TARGET_OS))) -LIBLZ4 = msys-lz4-$(LIBVER_MAJOR) -WINBASED = yes - else - ifneq (,$(filter CYGWIN%,$(TARGET_OS))) -LIBLZ4 = cyglz4-$(LIBVER_MAJOR) -WINBASED = yes - else -LIBLZ4 = liblz4.$(SHARED_EXT_VER) -WINBASED = no -EXT = - endif - endif - endif -endif - -ifeq ($(WINBASED),yes) -EXT = .exe -WINDRES = windres -endif - -#determine if dev/nul based on host environment -ifneq (,$(filter MINGW% MSYS% CYGWIN%,$(shell $(UNAME)))) -VOID := /dev/null -else - ifneq (,$(filter Windows%,$(OS))) -VOID := nul - else -VOID := /dev/null - endif -endif - -ifneq (,$(filter Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS Haiku MidnightBSD MINGW% CYGWIN% MSYS%,$(shell $(UNAME)))) -POSIX_ENV = Yes -else -POSIX_ENV = No -endif - -# Avoid symlinks when targeting Windows or building on a Windows host -ifeq ($(WINBASED),yes) -LN_SF = cp -p -else - ifneq (,$(filter MINGW% MSYS% CYGWIN%,$(shell $(UNAME)))) -LN_SF = cp -p - else - ifneq (,$(filter Windows%,$(OS))) -LN_SF = cp -p - else -LN_SF = ln -sf - endif - endif -endif - -ifneq (,$(filter $(shell $(UNAME)),SunOS)) -INSTALL ?= ginstall -else -INSTALL ?= install -endif - -INSTALL_PROGRAM ?= $(INSTALL) -m 755 -INSTALL_DATA ?= $(INSTALL) -m 644 -INSTALL_DIR ?= $(INSTALL) -d -m 755 diff --git a/librocksdb-sys/lz4/NEWS b/librocksdb-sys/lz4/NEWS deleted file mode 100644 index 0a56992..0000000 --- a/librocksdb-sys/lz4/NEWS +++ /dev/null @@ -1,341 +0,0 @@ -v1.9.4 -perf : faster decoding speed (~+20%) on aarch64 platforms -perf : faster decoding speed (~+70%) for -BD4 setting in CLI -api : new function `LZ4_decompress_safe_partial_usingDict()` by @yawqi -api : lz4frame: ability to provide custom allocators at state creation -api : can skip checksum validation for improved decoding speed -api : new experimental unit `lz4file` for file i/o API, by @anjiahao1 -api : new experimental function `LZ4F_uncompressedUpdate()`, by @alexmohr -cli : `--list` works on `stdin` input, by @Low-power -cli : `--no-crc` does not produce (compression) nor check (decompression) checksums -cli : fix: `--test` and `--list` produce an error code when parsing invalid input -cli : fix: support skippable frames when passed via `stdin`, reported by @davidmankin -build: fix: Makefile respects CFLAGS directives passed via environment variable -build: `LZ4_FREESTANDING`, new build macro for freestanding environments, by @t-mat -build: `make` and `make test` are compatible with `-j` parallel run -build: AS/400 compatibility, by @jonrumsey -build: Solaris 10 compatibility, by @pekdon -build: MSVC 2022 support, by @t-mat -build: improved meson script, by @eli-schwartz -doc : Updated LZ4 block format, provide an "implementation notes" section - -v1.9.3 -perf: highly improved speed in kernel space, by @terrelln -perf: faster speed with Visual Studio, thanks to @wolfpld and @remittor -perf: improved dictionary compression speed, by @felixhandte -perf: fixed LZ4_compress_HC_destSize() ratio, detected by @hsiangkao -perf: reduced stack usage in high compression mode, by @Yanpas -api : LZ4_decompress_safe_partial() supports unknown compressed size, requested by @jfkthame -api : improved LZ4F_compressBound() with automatic flushing, by Christopher Harvie -api : can (de)compress to/from NULL without UBs -api : fix alignment test on 32-bit systems (state initialization) -api : fix LZ4_saveDictHC() in corner case scenario, detected by @IgorKorkin -cli : `-l` legacy format is now compatible with `-m` multiple files, by Filipe Calasans -cli : benchmark mode supports dictionary, by @rkoradi -cli : fix --fast with large argument, detected by @picoHz -build: link to user-defined memory functions with LZ4_USER_MEMORY_FUNCTIONS, suggested by Yuriy Levchenko -build: contrib/cmake_unofficial/ moved to build/cmake/ -build: visual/* moved to build/ -build: updated meson script, by @neheb -build: tinycc support, by Anton Kochkov -install: Haiku support, by Jerome Duval -doc : updated LZ4 frame format, clarify EndMark - -v1.9.2 -fix : out-of-bound read in exceptional circumstances when using decompress_partial(), by @terrelln -fix : slim opportunity for out-of-bound write with compress_fast() with a large enough input and when providing an output smaller than recommended (< LZ4_compressBound(inputSize)), by @terrelln -fix : rare data corruption bug with LZ4_compress_destSize(), by @terrelln -fix : data corruption bug when Streaming with an Attached Dict in HC Mode, by @felixhandte -perf: enable LZ4_FAST_DEC_LOOP on aarch64/GCC by default, by @prekageo -perf: improved lz4frame streaming API speed, by @dreambottle -perf: speed up lz4hc on slow patterns when using external dictionary, by @terrelln -api: better in-place decompression and compression support -cli : --list supports multi-frames files, by @gstedman -cli: --version outputs to stdout -cli : add option --best as an alias of -12 , by @Low-power -misc: Integration into oss-fuzz by @cmeister2, expanded list of scenarios by @terrelln - -v1.9.1 -fix : decompression functions were reading a few bytes beyond input size (introduced in v1.9.0, reported by @ppodolsky and @danlark1) -api : fix : lz4frame initializers compatibility with c++, reported by @degski -cli : added command --list, based on a patch by @gabrielstedman -build: improved Windows build, by @JPeterMugaas -build: AIX, by Norman Green - -v1.9.0 -perf: large decompression speed improvement on x86/x64 (up to +20%) by @djwatson -api : changed : _destSize() compression variants are promoted to stable API -api : new : LZ4_initStream(HC), replacing LZ4_resetStream(HC) -api : changed : LZ4_resetStream(HC) as recommended reset function, for better performance on small data -cli : support custom block sizes, by @blezsan -build: source code can be amalgamated, by Bing Xu -build: added meson build, by @lzutao -build: new build macros : LZ4_DISTANCE_MAX, LZ4_FAST_DEC_LOOP -install: MidnightBSD, by @laffer1 -install: msys2 on Windows 10, by @vtorri - -v1.8.3 -perf: minor decompression speed improvement (~+2%) with gcc -fix : corruption in v1.8.2 at level 9 for files > 64KB under rare conditions (#560) -cli : new command --fast, by @jennifermliu -cli : fixed elapsed time, and added cpu load indicator (on -vv) (#555) -api : LZ4_decompress_safe_partial() now decodes exactly the nb of bytes requested (feature request #566) -build : added Haiku target, by @fbrosson, and MidnightBSD, by @laffer1 -doc : updated documentation regarding dictionary compression - -v1.8.2 -perf: *much* faster dictionary compression on small files, by @felixhandte -perf: improved decompression speed and binary size, by Alexey Tourbin (@svpv) -perf: slightly faster HC compression and decompression speed -perf: very small compression ratio improvement -fix : compression compatible with low memory addresses (< 0xFFFF) -fix : decompression segfault when provided with NULL input, by @terrelln -cli : new command --favor-decSpeed -cli : benchmark mode more accurate for small inputs -fullbench : can bench _destSize() variants, by @felixhandte -doc : clarified block format parsing restrictions, by Alexey Tourbin (@svpv) - -v1.8.1 -perf : faster and stronger ultra modes (levels 10+) -perf : slightly faster compression and decompression speed -perf : fix bad degenerative case, reported by @c-morgenstern -fix : decompression failed when using a combination of extDict + low memory address (#397), reported and fixed by Julian Scheid (@jscheid) -cli : support for dictionary compression (`-D`), by Felix Handte @felixhandte -cli : fix : `lz4 -d --rm` preserves timestamp (#441) -cli : fix : do not modify /dev/null permission as root, by @aliceatlas -api : `_destSize()` variant supported for all compression levels -build : `make` and `make test` compatible with `-jX`, reported by @mwgamera -build : can control LZ4LIB_VISIBILITY macro, by @mikir -install: fix man page directory (#387), reported by Stuart Cardall (@itoffshore) - -v1.8.0 -cli : fix : do not modify /dev/null permissions, reported by @Maokaman1 -cli : added GNU separator -- specifying that all following arguments are files -API : added LZ4_compress_HC_destSize(), by Oleg (@remittor) -API : added LZ4F_resetDecompressionContext() -API : lz4frame : negative compression levels trigger fast acceleration, request by Lawrence Chan -API : lz4frame : can control block checksum and dictionary ID -API : fix : expose obsolete decoding functions, reported by Chen Yufei -API : experimental : lz4frame_static : new dictionary compression API -build : fix : static lib installation, by Ido Rosen -build : dragonFlyBSD, OpenBSD, NetBSD supported -build : LZ4_MEMORY_USAGE can be modified at compile time, through external define -doc : Updated LZ4 Frame format to v1.6.0, restoring Dictionary-ID field -doc : lz4 api manual, by Przemyslaw Skibinski - -v1.7.5 -lz4hc : new high compression mode : levels 10-12 compress more and slower, by Przemyslaw Skibinski -lz4cat : fix : works with relative path (#284) and stdin (#285) (reported by @beiDei8z) -cli : fix minor notification when using -r recursive mode -API : lz4frame : LZ4F_frameBound(0) gives upper bound of *flush() and *End() operations (#290, #280) -doc : markdown version of man page, by Takayuki Matsuoka (#279) -build : Makefile : fix make -jX lib+exe concurrency (#277) -build : cmake : improvements by Michał Górny (#296) - -v1.7.4.2 -fix : Makefile : release build compatible with PIE and customized compilation directives provided through environment variables (#274, reported by Antoine Martin) - -v1.7.4 -Improved : much better speed in -mx32 mode -cli : fix : Large file support in 32-bits mode on Mac OS-X -fix : compilation on gcc 4.4 (#272), reported by Antoine Martin - -v1.7.3 -Changed : moved to versioning; package, cli and library have same version number -Improved: Small decompression speed boost -Improved: Small compression speed improvement on 64-bits systems -Improved: Small compression ratio and speed improvement on small files -Improved: Significant speed boost on ARMv6 and ARMv7 -Fix : better ratio on 64-bits big-endian targets -Improved cmake build script, by Evan Nemerson -New liblz4-dll project, by Przemyslaw Skibinki -Makefile: Generates object files (*.o) for faster (re)compilation on low power systems -cli : new : --rm and --help commands -cli : new : preserved file attributes, by Przemyslaw Skibinki -cli : fix : crash on some invalid inputs -cli : fix : -t correctly validates lz4-compressed files, by Nick Terrell -cli : fix : detects and reports fread() errors, thanks to Hiroshi Fujishima report #243 -cli : bench : new : -r recursive mode -lz4cat : can cat multiple files in a single command line (#184) -Added : doc/lz4_manual.html, by Przemyslaw Skibinski -Added : dictionary compression and frame decompression examples, by Nick Terrell -Added : Debianization, by Evgeniy Polyakov - -r131 -New : Dos/DJGPP target, thanks to Louis Santillan (#114) -Added : Example using lz4frame library, by Zbigniew Jędrzejewski-Szmek (#118) -Changed: xxhash symbols are modified (namespace emulation) within liblz4 - -r130: -Fixed : incompatibility sparse mode vs console, reported by Yongwoon Cho (#105) -Fixed : LZ4IO exits too early when frame crc not present, reported by Yongwoon Cho (#106) -Fixed : incompatibility sparse mode vs append mode, reported by Takayuki Matsuoka (#110) -Performance fix : big compression speed boost for clang (+30%) -New : cross-version test, by Takayuki Matsuoka - -r129: -Added : LZ4_compress_fast(), LZ4_compress_fast_continue() -Added : LZ4_compress_destSize() -Changed: New lz4 and lz4hc compression API. Previous function prototypes still supported. -Changed: Sparse file support enabled by default -New : LZ4 CLI improved performance compressing/decompressing multiple files (#86, kind contribution from Kyle J. Harper & Takayuki Matsuoka) -Fixed : GCC 4.9+ optimization bug - Reported by Markus Trippelsdorf, Greg Slazinski & Evan Nemerson -Changed: Enums converted to LZ4F_ namespace convention - by Takayuki Matsuoka -Added : AppVeyor CI environment, for Visual tests - Suggested by Takayuki Matsuoka -Modified:Obsolete functions generate warnings - Suggested by Evan Nemerson, contributed by Takayuki Matsuoka -Fixed : Bug #75 (unfinished stream), reported by Yongwoon Cho -Updated: Documentation converted to MarkDown format - -r128: -New : lz4cli sparse file support (Requested by Neil Wilson, and contributed by Takayuki Matsuoka) -New : command -m, to compress multiple files in a single command (suggested by Kyle J. Harper) -Fixed : Restored lz4hc compression ratio (slightly lower since r124) -New : lz4 cli supports long commands (suggested by Takayuki Matsuoka) -New : lz4frame & lz4cli frame content size support -New : lz4frame supports skippable frames, as requested by Sergey Cherepanov -Changed: Default "make install" directory is /usr/local, as notified by Ron Johnson -New : lz4 cli supports "pass-through" mode, requested by Neil Wilson -New : datagen can generate sparse files -New : scan-build tests, thanks to kind help by Takayuki Matsuoka -New : g++ compatibility tests -New : arm cross-compilation test, thanks to kind help by Takayuki Matsuoka -Fixed : Fuzzer + frametest compatibility with NetBSD (issue #48, reported by Thomas Klausner) -Added : Visual project directory -Updated: Man page & Specification - -r127: -N/A : added a file on SVN - -r126: -New : lz4frame API is now integrated into liblz4 -Fixed : GCC 4.9 bug on highest performance settings, reported by Greg Slazinski -Fixed : bug within LZ4 HC streaming mode, reported by James Boyle -Fixed : older compiler don't like nameless unions, reported by Cheyi Lin -Changed : lz4 is C90 compatible -Changed : added -pedantic option, fixed a few mminor warnings - -r125: -Changed : endian and alignment code -Changed : directory structure : new "lib" directory -Updated : lz4io, now uses lz4frame -Improved: slightly improved decoding speed -Fixed : LZ4_compress_limitedOutput(); Special thanks to Christopher Speller ! -Fixed : some alignment warnings under clang -Fixed : deprecated function LZ4_slideInputBufferHC() - -r124: -New : LZ4 HC streaming mode -Fixed : LZ4F_compressBound() using null preferencesPtr -Updated : xxHash to r38 -Updated library number, to 1.4.0 - -r123: -Added : experimental lz4frame API, thanks to Takayuki Matsuoka and Christopher Jackson for testings -Fix : s390x support, thanks to Nobuhiro Iwamatsu -Fix : test mode (-t) no longer requires confirmation, thanks to Thary Nguyen - -r122: -Fix : AIX & AIX64 support (SamG) -Fix : mips 64-bits support (lew van) -Added : Examples directory, using code examples from Takayuki Matsuoka -Updated : Framing specification, to v1.4.1 -Updated : xxHash, to r36 - -r121: -Added : Makefile : install for kFreeBSD and Hurd (Nobuhiro Iwamatsu) -Fix : Makefile : install for OS-X and BSD, thanks to Takayuki Matsuoka - -r120: -Modified : Streaming API, using strong types -Added : LZ4_versionNumber(), thanks to Takayuki Matsuoka -Fix : OS-X : library install name, thanks to Clemens Lang -Updated : Makefile : synchronize library version number with lz4.h, thanks to Takayuki Matsuoka -Updated : Makefile : stricter compilation flags -Added : pkg-config, thanks to Zbigniew Jędrzejewski-Szmek (issue 135) -Makefile : lz4-test only test native binaries, as suggested by Michał Górny (issue 136) -Updated : xxHash to r35 - -r119: -Fix : Issue 134 : extended malicious address space overflow in 32-bits mode for some specific configurations - -r118: -New : LZ4 Streaming API (Fast version), special thanks to Takayuki Matsuoka -New : datagen : parametrable synthetic data generator for tests -Improved : fuzzer, support more test cases, more parameters, ability to jump to specific test -fix : support ppc64le platform (issue 131) -fix : Issue 52 (malicious address space overflow in 32-bits mode when using large custom format) -fix : Makefile : minor issue 130 : header files permissions - -r117: -Added : man pages for lz4c and lz4cat -Added : automated tests on Travis, thanks to Takayuki Matsuoka ! -fix : block-dependency command line (issue 127) -fix : lz4fullbench (issue 128) - -r116: -hotfix (issue 124 & 125) - -r115: -Added : lz4cat utility, installed on POSX systems (issue 118) -OS-X compatible compilation of dynamic library (issue 115) - -r114: -Makefile : library correctly compiled with -O3 switch (issue 114) -Makefile : library compilation compatible with clang -Makefile : library is versioned and linked (issue 119) -lz4.h : no more static inline prototypes (issue 116) -man : improved header/footer (issue 111) -Makefile : Use system default $(CC) & $(MAKE) variables (issue 112) -xxhash : updated to r34 - -r113: -Large decompression speed improvement for GCC 32-bits. Thanks to Valery Croizier ! -LZ4HC : Compression Level is now a programmable parameter (CLI from 4 to 9) -Separated IO routines from command line (lz4io.c) -Version number into lz4.h (suggested by Francesc Alted) - -r112: -quickfix - -r111 : -Makefile : added capability to install libraries -Modified Directory tree, to better separate libraries from programs. - -r110 : -lz4 & lz4hc : added capability to allocate state & stream state with custom allocator (issue 99) -fuzzer & fullbench : updated to test new functions -man : documented -l command (Legacy format, for Linux kernel compression) (issue 102) -cmake : improved version by Mika Attila, building programs and libraries (issue 100) -xxHash : updated to r33 -Makefile : clean also delete local package .tar.gz - -r109 : -lz4.c : corrected issue 98 (LZ4_compress_limitedOutput()) -Makefile : can specify version number from makefile - -r108 : -lz4.c : corrected compression efficiency issue 97 in 64-bits chained mode (-BD) for streams > 4 GB (thanks Roman Strashkin for reporting) - -r107 : -Makefile : support DESTDIR for staged installs. Thanks Jorge Aparicio. -Makefile : make install installs both lz4 and lz4c (Jorge Aparicio) -Makefile : removed -Wno-implicit-declaration compilation switch -lz4cli.c : include for isatty() (Luca Barbato) -lz4.h : introduced LZ4_MAX_INPUT_SIZE constant (Shay Green) -lz4.h : LZ4_compressBound() : unified macro and inline definitions (Shay Green) -lz4.h : LZ4_decompressSafe_partial() : clarify comments (Shay Green) -lz4.c : LZ4_compress() verify input size condition (Shay Green) -bench.c : corrected a bug in free memory size evaluation -cmake : install into bin/ directory (Richard Yao) -cmake : check for just C compiler (Elan Ruusamae) - -r106 : -Makefile : make dist modify text files in the package to respect Unix EoL convention -lz4cli.c : corrected small display bug in HC mode - -r105 : -Makefile : New install script and man page, contributed by Prasad Pandit -lz4cli.c : Minor modifications, for easier extensibility -COPYING : added license file -LZ4_Streaming_Format.odt : modified file name to remove white space characters -Makefile : .exe suffix now properly added only for Windows target diff --git a/librocksdb-sys/lz4/README.md b/librocksdb-sys/lz4/README.md deleted file mode 100644 index b314e69..0000000 --- a/librocksdb-sys/lz4/README.md +++ /dev/null @@ -1,120 +0,0 @@ -LZ4 - Extremely fast compression -================================ - -LZ4 is lossless compression algorithm, -providing compression speed > 500 MB/s per core, -scalable with multi-cores CPU. -It features an extremely fast decoder, -with speed in multiple GB/s per core, -typically reaching RAM speed limits on multi-core systems. - -Speed can be tuned dynamically, selecting an "acceleration" factor -which trades compression ratio for faster speed. -On the other end, a high compression derivative, LZ4_HC, is also provided, -trading CPU time for improved compression ratio. -All versions feature the same decompression speed. - -LZ4 is also compatible with [dictionary compression](https://github.com/facebook/zstd#the-case-for-small-data-compression), -both at [API](https://github.com/lz4/lz4/blob/v1.8.3/lib/lz4frame.h#L481) and [CLI](https://github.com/lz4/lz4/blob/v1.8.3/programs/lz4.1.md#operation-modifiers) levels. -It can ingest any input file as dictionary, though only the final 64KB are used. -This capability can be combined with the [Zstandard Dictionary Builder](https://github.com/facebook/zstd/blob/v1.3.5/programs/zstd.1.md#dictionary-builder), -in order to drastically improve compression performance on small files. - - -LZ4 library is provided as open-source software using BSD 2-Clause license. - - -|Branch |Status | -|------------|---------| -|dev | [![Build Status][travisDevBadge]][travisLink] [![Build status][AppveyorDevBadge]][AppveyorLink] | - -[travisDevBadge]: https://travis-ci.org/lz4/lz4.svg?branch=dev "Continuous Integration test suite" -[travisLink]: https://travis-ci.org/lz4/lz4 -[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/github/lz4/lz4?branch=dev&svg=true "Windows test suite" -[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/lz4-1lndh - - -Benchmarks -------------------------- - -The benchmark uses [lzbench], from @inikep -compiled with GCC v8.2.0 on Linux 64-bits (Ubuntu 4.18.0-17). -The reference system uses a Core i7-9700K CPU @ 4.9GHz (w/ turbo boost). -Benchmark evaluates the compression of reference [Silesia Corpus] -in single-thread mode. - -[lzbench]: https://github.com/inikep/lzbench -[Silesia Corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia - -| Compressor | Ratio | Compression | Decompression | -| ---------- | ----- | ----------- | ------------- | -| memcpy | 1.000 | 13700 MB/s | 13700 MB/s | -|**LZ4 default (v1.9.0)** |**2.101**| **780 MB/s**| **4970 MB/s** | -| LZO 2.09 | 2.108 | 670 MB/s | 860 MB/s | -| QuickLZ 1.5.0 | 2.238 | 575 MB/s | 780 MB/s | -| Snappy 1.1.4 | 2.091 | 565 MB/s | 1950 MB/s | -| [Zstandard] 1.4.0 -1 | 2.883 | 515 MB/s | 1380 MB/s | -| LZF v3.6 | 2.073 | 415 MB/s | 910 MB/s | -| [zlib] deflate 1.2.11 -1| 2.730 | 100 MB/s | 415 MB/s | -|**LZ4 HC -9 (v1.9.0)** |**2.721**| 41 MB/s | **4900 MB/s** | -| [zlib] deflate 1.2.11 -6| 3.099 | 36 MB/s | 445 MB/s | - -[zlib]: http://www.zlib.net/ -[Zstandard]: http://www.zstd.net/ - -LZ4 is also compatible and optimized for x32 mode, -for which it provides additional speed performance. - - -Installation -------------------------- - -``` -make -make install # this command may require root permissions -``` - -LZ4's `Makefile` supports standard [Makefile conventions], -including [staged installs], [redirection], or [command redefinition]. -It is compatible with parallel builds (`-j#`). - -[Makefile conventions]: https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html -[staged installs]: https://www.gnu.org/prep/standards/html_node/DESTDIR.html -[redirection]: https://www.gnu.org/prep/standards/html_node/Directory-Variables.html -[command redefinition]: https://www.gnu.org/prep/standards/html_node/Utilities-in-Makefiles.html - -### Building LZ4 - Using vcpkg - -You can download and install LZ4 using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: - - git clone https://github.com/Microsoft/vcpkg.git - cd vcpkg - ./bootstrap-vcpkg.sh - ./vcpkg integrate install - vcpkg install lz4 - -The LZ4 port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. - -Documentation -------------------------- - -The raw LZ4 block compression format is detailed within [lz4_Block_format]. - -Arbitrarily long files or data streams are compressed using multiple blocks, -for streaming requirements. These blocks are organized into a frame, -defined into [lz4_Frame_format]. -Interoperable versions of LZ4 must also respect the frame format. - -[lz4_Block_format]: doc/lz4_Block_format.md -[lz4_Frame_format]: doc/lz4_Frame_format.md - - -Other source versions -------------------------- - -Beyond the C reference source, -many contributors have created versions of lz4 in multiple languages -(Java, C#, Python, Perl, Ruby, etc.). -A list of known source ports is maintained on the [LZ4 Homepage]. - -[LZ4 Homepage]: http://www.lz4.org diff --git a/librocksdb-sys/lz4/appveyor.yml b/librocksdb-sys/lz4/appveyor.yml deleted file mode 100644 index b4c27ef..0000000 --- a/librocksdb-sys/lz4/appveyor.yml +++ /dev/null @@ -1,147 +0,0 @@ -version: 1.0.{build} -environment: - matrix: - - COMPILER: "gcc" - PLATFORM: "mingw64" - - COMPILER: "gcc" - PLATFORM: "mingw32" - - COMPILER: "visual" - CONFIGURATION: "Debug" - PLATFORM: "x64" - - COMPILER: "visual" - CONFIGURATION: "Debug" - PLATFORM: "Win32" - - COMPILER: "visual" - CONFIGURATION: "Release" - PLATFORM: "x64" - - COMPILER: "visual" - CONFIGURATION: "Release" - PLATFORM: "Win32" - - COMPILER: "gcc" - PLATFORM: "clang" - -install: - - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - - MKDIR bin - - if [%COMPILER%]==[gcc] SET PATH_ORIGINAL=%PATH% - - if [%COMPILER%]==[gcc] ( - SET "PATH_MINGW32=c:\MinGW\bin;c:\MinGW\usr\bin" && - SET "PATH_MINGW64=c:\msys64\mingw64\bin;c:\msys64\usr\bin" && - COPY C:\MinGW\bin\mingw32-make.exe C:\MinGW\bin\make.exe && - COPY C:\MinGW\bin\gcc.exe C:\MinGW\bin\cc.exe - ) else ( - IF [%PLATFORM%]==[x64] (SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;") - ) - -build_script: - - if [%PLATFORM%]==[mingw32] SET PATH=%PATH_MINGW32%;%PATH_ORIGINAL% - - if [%PLATFORM%]==[mingw64] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL% - - if [%PLATFORM%]==[clang] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL% - - ECHO *** && - ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% && - ECHO *** - - if [%PLATFORM%]==[clang] (clang -v) - - if [%COMPILER%]==[gcc] (gcc -v) - - if [%COMPILER%]==[gcc] ( - echo ----- && - make -v && - echo ----- && - if not [%PLATFORM%]==[clang] ( - make -C programs lz4 && - make -C tests fullbench && - make -C tests fuzzer && - make -C lib lib V=1 - ) ELSE ( - make -C programs lz4 CC=clang MOREFLAGS="--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion" && - make -C tests fullbench CC=clang MOREFLAGS="--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion" && - make -C tests fuzzer CC=clang MOREFLAGS="--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion" && - make -C lib lib CC=clang MOREFLAGS="--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion" - ) - ) - - if [%COMPILER%]==[gcc] if not [%PLATFORM%]==[clang] ( - MKDIR bin\dll bin\static bin\example bin\include && - COPY tests\fullbench.c bin\example\ && - COPY lib\xxhash.c bin\example\ && - COPY lib\xxhash.h bin\example\ && - COPY lib\lz4.h bin\include\ && - COPY lib\lz4hc.h bin\include\ && - COPY lib\lz4frame.h bin\include\ && - COPY lib\liblz4.a bin\static\liblz4_static.lib && - COPY lib\dll\* bin\dll\ && - COPY lib\dll\example\Makefile bin\example\ && - COPY lib\dll\example\fullbench-dll.* bin\example\ && - COPY lib\dll\example\README.md bin\ && - COPY programs\lz4.exe bin\lz4.exe - ) - - if [%COMPILER%]==[gcc] if [%PLATFORM%]==[mingw64] ( - 7z.exe a -bb1 bin\lz4_x64.zip NEWS .\bin\lz4.exe .\bin\README.md .\bin\example .\bin\dll .\bin\static .\bin\include && - appveyor PushArtifact bin\lz4_x64.zip - ) - - if [%COMPILER%]==[gcc] if [%PLATFORM%]==[mingw32] ( - 7z.exe a -bb1 bin\lz4_x86.zip NEWS .\bin\lz4.exe .\bin\README.md .\bin\example .\bin\dll .\bin\static .\bin\include && - appveyor PushArtifact bin\lz4_x86.zip - ) - - if [%COMPILER%]==[gcc] (COPY tests\*.exe programs\) - - if [%COMPILER%]==[visual] ( - ECHO *** && - ECHO *** Building Visual Studio 2010 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\lz4.sln" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /p:EnableWholeProgramOptimization=true /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - ECHO *** && - ECHO *** Building Visual Studio 2012 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\lz4.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - ECHO *** && - ECHO *** Building Visual Studio 2013 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\lz4.sln" /m /verbosity:minimal /property:PlatformToolset=v120 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - ECHO *** && - ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && - ECHO *** && - msbuild "build\VS2010\lz4.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && - COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe programs\ - ) - -test_script: - - ECHO *** && - ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% && - ECHO *** - - if not [%COMPILER%]==[unknown] ( - CD programs && - lz4 -h && - lz4 -i1b lz4.exe && - lz4 -i1b5 lz4.exe && - lz4 -i1b10 lz4.exe && - lz4 -i1b15 lz4.exe && - echo ------- lz4 tested ------- && - fullbench.exe -i1 fullbench.exe && - echo trying to launch fuzzer.exe && - fuzzer.exe -v -T30s - ) - -artifacts: - - path: bin\lz4_x64.zip - - path: bin\lz4_x86.zip - -deploy: -- provider: GitHub - artifact: bin\lz4_x64.zip - auth_token: - secure: w6UJaGie0qbZvffr/fqyhO/Vj8rMiQWnv9a8qm3gxfngdHDTMT42wYupqJpIExId - force_update: true - prerelease: true - on: - COMPILER: gcc - PLATFORM: "mingw64" - appveyor_repo_tag: true - -- provider: GitHub - artifact: bin\lz4_x86.zip - auth_token: - secure: w6UJaGie0qbZvffr/fqyhO/Vj8rMiQWnv9a8qm3gxfngdHDTMT42wYupqJpIExId - force_update: true - prerelease: true - on: - COMPILER: gcc - PLATFORM: "mingw32" - appveyor_repo_tag: true diff --git a/librocksdb-sys/lz4/build/.gitignore b/librocksdb-sys/lz4/build/.gitignore deleted file mode 100644 index 69e1111..0000000 --- a/librocksdb-sys/lz4/build/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -# Visual C++ -.vs/ -*Copy -*.db -*.opensdf -*.sdf -*.suo -*.user -ver*/ -VS2010/bin/ -VS2017/bin/ -ipch - -# Fixup for lz4 project directories -!VS2010/lz4 -!VS2017/lz4 diff --git a/librocksdb-sys/lz4/build/README.md b/librocksdb-sys/lz4/build/README.md deleted file mode 100644 index d416aeb..0000000 --- a/librocksdb-sys/lz4/build/README.md +++ /dev/null @@ -1,55 +0,0 @@ -Projects for various integrated development environments (IDE) -============================================================== - -#### Included projects - -The following projects are included with the lz4 distribution: -- `cmake` - CMake project -- `VS2010` - Visual Studio 2010 project (which also works well with Visual Studio 2012, 2013, 2015) -- `VS2017` - Visual Studio 2017 project - - -#### How to compile lz4 with Visual Studio - -1. Install Visual Studio e.g. VS 2015 Community Edition (it's free). -2. Download the latest version of lz4 from https://github.com/lz4/lz4/releases -3. Decompress ZIP archive. -4. Go to decompressed directory then to `build` then `VS2010` and open `lz4.sln` -5. Visual Studio will ask about converting VS2010 project to VS2015 and you should agree. -6. Change `Debug` to `Release` and if you have 64-bit Windows change also `Win32` to `x64`. -7. Press F7 on keyboard or select `BUILD` from the menu bar and choose `Build Solution`. -8. If compilation will be fine a compiled executable will be in `build\VS2010\bin\x64_Release\lz4.exe` - - -#### Projects available within lz4.sln - -The Visual Studio solution file `lz4.sln` contains many projects that will be compiled to the -`build\VS2010\bin\$(Platform)_$(Configuration)` directory. For example `lz4` set to `x64` and -`Release` will be compiled to `build\VS2010\bin\x64_Release\lz4.exe`. The solution file contains the -following projects: - -- `lz4` : Command Line Utility, supporting gzip-like arguments -- `datagen` : Synthetic and parametrable data generator, for tests -- `frametest` : Test tool that checks lz4frame integrity on target platform -- `fullbench` : Precisely measure speed for each lz4 inner functions -- `fuzzer` : Test tool, to check lz4 integrity on target platform -- `liblz4` : A static LZ4 library compiled to `liblz4_static.lib` -- `liblz4-dll` : A dynamic LZ4 library (DLL) compiled to `liblz4.dll` with the import library `liblz4.lib` -- `fullbench-dll` : The fullbench program compiled with the import library; the executable requires LZ4 DLL - - -#### Using LZ4 DLL with Microsoft Visual C++ project - -The header files `lib\lz4.h`, `lib\lz4hc.h`, `lib\lz4frame.h` and the import library -`build\VS2010\bin\$(Platform)_$(Configuration)\liblz4.lib` are required to -compile a project using Visual C++. - -1. The path to header files should be added to `Additional Include Directories` that can - be found in Project Properties of Visual Studio IDE in the `C/C++` Property Pages on the `General` page. -2. The import library has to be added to `Additional Dependencies` that can - be found in Project Properties in the `Linker` Property Pages on the `Input` page. - If one will provide only the name `liblz4.lib` without a full path to the library - then the directory has to be added to `Linker\General\Additional Library Directories`. - -The compiled executable will require LZ4 DLL which is available at -`build\VS2010\bin\$(Platform)_$(Configuration)\liblz4.dll`. diff --git a/librocksdb-sys/lz4/build/VS2010/datagen/datagen.vcxproj b/librocksdb-sys/lz4/build/VS2010/datagen/datagen.vcxproj deleted file mode 100644 index e24f961..0000000 --- a/librocksdb-sys/lz4/build/VS2010/datagen/datagen.vcxproj +++ /dev/null @@ -1,169 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {D745AE2F-596A-403A-9B91-81A8C6779243} - Win32Proj - datagen - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - - - Application - true - Unicode - - - Application - false - Unicode - true - - - Application - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - diff --git a/librocksdb-sys/lz4/build/VS2010/frametest/frametest.vcxproj b/librocksdb-sys/lz4/build/VS2010/frametest/frametest.vcxproj deleted file mode 100644 index 3196768..0000000 --- a/librocksdb-sys/lz4/build/VS2010/frametest/frametest.vcxproj +++ /dev/null @@ -1,176 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7} - Win32Proj - frametest - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - - - Application - true - Unicode - - - Application - false - Unicode - true - - - Application - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - diff --git a/librocksdb-sys/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj b/librocksdb-sys/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj deleted file mode 100644 index 8f503f5..0000000 --- a/librocksdb-sys/lz4/build/VS2010/fullbench-dll/fullbench-dll.vcxproj +++ /dev/null @@ -1,180 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {13992FD2-077E-4954-B065-A428198201A9} - Win32Proj - fullbench-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - - - Application - true - Unicode - - - Application - false - Unicode - true - - - Application - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2010/fullbench/fullbench.vcxproj b/librocksdb-sys/lz4/build/VS2010/fullbench/fullbench.vcxproj deleted file mode 100644 index aa67431..0000000 --- a/librocksdb-sys/lz4/build/VS2010/fullbench/fullbench.vcxproj +++ /dev/null @@ -1,176 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E} - Win32Proj - fullbench - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - - - Application - true - Unicode - - - Application - false - Unicode - true - - - Application - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - diff --git a/librocksdb-sys/lz4/build/VS2010/fuzzer/fuzzer.vcxproj b/librocksdb-sys/lz4/build/VS2010/fuzzer/fuzzer.vcxproj deleted file mode 100644 index 21cbf56..0000000 --- a/librocksdb-sys/lz4/build/VS2010/fuzzer/fuzzer.vcxproj +++ /dev/null @@ -1,173 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {18B9F1A7-9C66-4352-898B-30804DADE0FD} - Win32Proj - fuzzer - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - - - Application - true - Unicode - - - Application - false - Unicode - true - - - Application - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - diff --git a/librocksdb-sys/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc b/librocksdb-sys/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc deleted file mode 100644 index e089c24..0000000 --- a/librocksdb-sys/lz4/build/VS2010/liblz4-dll/liblz4-dll.rc +++ /dev/null @@ -1,51 +0,0 @@ -// Microsoft Visual C++ generated resource script. -// - -#include "lz4.h" /* LZ4_VERSION_STRING */ -#define APSTUDIO_READONLY_SYMBOLS -#include "verrsrc.h" -#undef APSTUDIO_READONLY_SYMBOLS - - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) -LANGUAGE 9, 1 - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - PRODUCTVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - FILEFLAGSMASK VS_FFI_FILEFLAGSMASK -#ifdef _DEBUG - FILEFLAGS VS_FF_DEBUG -#else - FILEFLAGS 0x0L -#endif - FILEOS VOS_NT_WINDOWS32 - FILETYPE VFT_DLL - FILESUBTYPE VFT2_UNKNOWN -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904B0" - BEGIN - VALUE "CompanyName", "Yann Collet" - VALUE "FileDescription", "Extremely fast compression" - VALUE "FileVersion", LZ4_VERSION_STRING - VALUE "InternalName", "lz4.dll" - VALUE "LegalCopyright", "Copyright (C) 2013-2020, Yann Collet" - VALUE "OriginalFilename", "lz4.dll" - VALUE "ProductName", "LZ4" - VALUE "ProductVersion", LZ4_VERSION_STRING - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0409, 1200 - END -END - -#endif diff --git a/librocksdb-sys/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj b/librocksdb-sys/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj deleted file mode 100644 index 56ec3b9..0000000 --- a/librocksdb-sys/lz4/build/VS2010/liblz4-dll/liblz4-dll.vcxproj +++ /dev/null @@ -1,179 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9800039D-4AAA-43A4-BB78-FEF6F4836927} - Win32Proj - liblz4-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - liblz4-dll - - - - DynamicLibrary - true - Unicode - - - DynamicLibrary - true - Unicode - - - DynamicLibrary - false - Unicode - true - - - DynamicLibrary - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - true - true - true - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2010/liblz4/liblz4.vcxproj b/librocksdb-sys/lz4/build/VS2010/liblz4/liblz4.vcxproj deleted file mode 100644 index 61ea159..0000000 --- a/librocksdb-sys/lz4/build/VS2010/liblz4/liblz4.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476} - Win32Proj - liblz4 - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - StaticLibrary - true - Unicode - - - StaticLibrary - true - Unicode - - - StaticLibrary - false - Unicode - true - - - StaticLibrary - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - true - true - true - - - - - - - - - - - - - - - - - - - diff --git a/librocksdb-sys/lz4/build/VS2010/lz4.sln b/librocksdb-sys/lz4/build/VS2010/lz4.sln deleted file mode 100644 index 78f223b..0000000 --- a/librocksdb-sys/lz4/build/VS2010/lz4.sln +++ /dev/null @@ -1,98 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Express 2012 for Windows Desktop -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lz4", "lz4\lz4.vcxproj", "{E30329AC-0057-4FE0-8FDA-7F650D398C4C}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblz4-dll", "liblz4-dll\liblz4-dll.vcxproj", "{9800039D-4AAA-43A4-BB78-FEF6F4836927}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblz4", "liblz4\liblz4.vcxproj", "{9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fuzzer", "fuzzer\fuzzer.vcxproj", "{18B9F1A7-9C66-4352-898B-30804DADE0FD}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench", "fullbench\fullbench.vcxproj", "{6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "frametest", "frametest\frametest.vcxproj", "{39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "datagen", "datagen\datagen.vcxproj", "{D745AE2F-596A-403A-9B91-81A8C6779243}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench-dll", "fullbench-dll\fullbench-dll.vcxproj", "{13992FD2-077E-4954-B065-A428198201A9}" - ProjectSection(ProjectDependencies) = postProject - {9800039D-4AAA-43A4-BB78-FEF6F4836927} = {9800039D-4AAA-43A4-BB78-FEF6F4836927} - EndProjectSection -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Debug|Win32.ActiveCfg = Debug|Win32 - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Debug|Win32.Build.0 = Debug|Win32 - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Debug|x64.ActiveCfg = Debug|x64 - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Debug|x64.Build.0 = Debug|x64 - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Release|Win32.ActiveCfg = Release|Win32 - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Release|Win32.Build.0 = Release|Win32 - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Release|x64.ActiveCfg = Release|x64 - {E30329AC-0057-4FE0-8FDA-7F650D398C4C}.Release|x64.Build.0 = Release|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|Win32.ActiveCfg = Debug|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|Win32.Build.0 = Debug|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|x64.ActiveCfg = Debug|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|x64.Build.0 = Debug|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|Win32.ActiveCfg = Release|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|Win32.Build.0 = Release|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|x64.ActiveCfg = Release|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|x64.Build.0 = Release|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|Win32.ActiveCfg = Debug|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|Win32.Build.0 = Debug|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|x64.ActiveCfg = Debug|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|x64.Build.0 = Debug|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|Win32.ActiveCfg = Release|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|Win32.Build.0 = Release|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|x64.ActiveCfg = Release|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|x64.Build.0 = Release|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|Win32.ActiveCfg = Debug|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|Win32.Build.0 = Debug|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|x64.ActiveCfg = Debug|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|x64.Build.0 = Debug|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|Win32.ActiveCfg = Release|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|Win32.Build.0 = Release|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|x64.ActiveCfg = Release|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|x64.Build.0 = Release|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|Win32.ActiveCfg = Debug|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|Win32.Build.0 = Debug|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|x64.ActiveCfg = Debug|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|x64.Build.0 = Debug|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|Win32.ActiveCfg = Release|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|Win32.Build.0 = Release|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|x64.ActiveCfg = Release|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|x64.Build.0 = Release|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|Win32.ActiveCfg = Debug|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|Win32.Build.0 = Debug|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|x64.ActiveCfg = Debug|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|x64.Build.0 = Debug|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|Win32.ActiveCfg = Release|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|Win32.Build.0 = Release|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|x64.ActiveCfg = Release|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|x64.Build.0 = Release|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|Win32.ActiveCfg = Debug|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|Win32.Build.0 = Debug|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|x64.ActiveCfg = Debug|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|x64.Build.0 = Debug|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|Win32.ActiveCfg = Release|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|Win32.Build.0 = Release|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|x64.ActiveCfg = Release|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|x64.Build.0 = Release|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.ActiveCfg = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.Build.0 = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.ActiveCfg = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.Build.0 = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.ActiveCfg = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.Build.0 = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.ActiveCfg = Release|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/librocksdb-sys/lz4/build/VS2010/lz4/lz4.rc b/librocksdb-sys/lz4/build/VS2010/lz4/lz4.rc deleted file mode 100644 index 5eec36b..0000000 --- a/librocksdb-sys/lz4/build/VS2010/lz4/lz4.rc +++ /dev/null @@ -1,51 +0,0 @@ -// Microsoft Visual C++ generated resource script. -// - -#include "lz4.h" /* LZ4_VERSION_STRING */ -#define APSTUDIO_READONLY_SYMBOLS -#include "verrsrc.h" -#undef APSTUDIO_READONLY_SYMBOLS - - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) -LANGUAGE 9, 1 - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - PRODUCTVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - FILEFLAGSMASK VS_FFI_FILEFLAGSMASK -#ifdef _DEBUG - FILEFLAGS VS_FF_DEBUG -#else - FILEFLAGS 0x0L -#endif - FILEOS VOS_NT_WINDOWS32 - FILETYPE VFT_DLL - FILESUBTYPE VFT2_UNKNOWN -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904B0" - BEGIN - VALUE "CompanyName", "Yann Collet" - VALUE "FileDescription", "Extremely fast compression" - VALUE "FileVersion", LZ4_VERSION_STRING - VALUE "InternalName", "lz4.exe" - VALUE "LegalCopyright", "Copyright (C) 2013-2020, Yann Collet" - VALUE "OriginalFilename", "lz4.exe" - VALUE "ProductName", "LZ4" - VALUE "ProductVersion", LZ4_VERSION_STRING - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0409, 1200 - END -END - -#endif diff --git a/librocksdb-sys/lz4/build/VS2010/lz4/lz4.vcxproj b/librocksdb-sys/lz4/build/VS2010/lz4/lz4.vcxproj deleted file mode 100644 index de7a714..0000000 --- a/librocksdb-sys/lz4/build/VS2010/lz4/lz4.vcxproj +++ /dev/null @@ -1,189 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {E30329AC-0057-4FE0-8FDA-7F650D398C4C} - Win32Proj - lz4 - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - - - Application - true - Unicode - - - Application - false - Unicode - true - - - Application - false - Unicode - true - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - setargv.obj;%(AdditionalDependencies) - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - setargv.obj;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - setargv.obj;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - setargv.obj;%(AdditionalDependencies) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/datagen/datagen.vcxproj b/librocksdb-sys/lz4/build/VS2017/datagen/datagen.vcxproj deleted file mode 100644 index 30e159e..0000000 --- a/librocksdb-sys/lz4/build/VS2017/datagen/datagen.vcxproj +++ /dev/null @@ -1,173 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {D745AE2F-596A-403A-9B91-81A8C6779243} - Win32Proj - datagen - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v141 - - - Application - true - Unicode - v141 - - - Application - false - Unicode - true - v141 - - - Application - false - Unicode - true - v141 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/frametest/frametest.vcxproj b/librocksdb-sys/lz4/build/VS2017/frametest/frametest.vcxproj deleted file mode 100644 index a3a403d..0000000 --- a/librocksdb-sys/lz4/build/VS2017/frametest/frametest.vcxproj +++ /dev/null @@ -1,180 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7} - Win32Proj - frametest - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v141 - - - Application - true - Unicode - v141 - - - Application - false - Unicode - true - v141 - - - Application - false - Unicode - true - v141 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj b/librocksdb-sys/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj deleted file mode 100644 index d54a8d7..0000000 --- a/librocksdb-sys/lz4/build/VS2017/fullbench-dll/fullbench-dll.vcxproj +++ /dev/null @@ -1,184 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {13992FD2-077E-4954-B065-A428198201A9} - Win32Proj - fullbench-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v141 - - - Application - true - Unicode - v141 - - - Application - false - Unicode - true - v141 - - - Application - false - Unicode - true - v141 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/fullbench/fullbench.vcxproj b/librocksdb-sys/lz4/build/VS2017/fullbench/fullbench.vcxproj deleted file mode 100644 index 54c9743..0000000 --- a/librocksdb-sys/lz4/build/VS2017/fullbench/fullbench.vcxproj +++ /dev/null @@ -1,180 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E} - Win32Proj - fullbench - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v141 - - - Application - true - Unicode - v141 - - - Application - false - Unicode - true - v141 - - - Application - false - Unicode - true - v141 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/fuzzer/fuzzer.vcxproj b/librocksdb-sys/lz4/build/VS2017/fuzzer/fuzzer.vcxproj deleted file mode 100644 index aa6fe42..0000000 --- a/librocksdb-sys/lz4/build/VS2017/fuzzer/fuzzer.vcxproj +++ /dev/null @@ -1,177 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {18B9F1A7-9C66-4352-898B-30804DADE0FD} - Win32Proj - fuzzer - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v141 - - - Application - true - Unicode - v141 - - - Application - false - Unicode - true - v141 - - - Application - false - Unicode - true - v141 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc b/librocksdb-sys/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc deleted file mode 100644 index e089c24..0000000 --- a/librocksdb-sys/lz4/build/VS2017/liblz4-dll/liblz4-dll.rc +++ /dev/null @@ -1,51 +0,0 @@ -// Microsoft Visual C++ generated resource script. -// - -#include "lz4.h" /* LZ4_VERSION_STRING */ -#define APSTUDIO_READONLY_SYMBOLS -#include "verrsrc.h" -#undef APSTUDIO_READONLY_SYMBOLS - - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) -LANGUAGE 9, 1 - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - PRODUCTVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - FILEFLAGSMASK VS_FFI_FILEFLAGSMASK -#ifdef _DEBUG - FILEFLAGS VS_FF_DEBUG -#else - FILEFLAGS 0x0L -#endif - FILEOS VOS_NT_WINDOWS32 - FILETYPE VFT_DLL - FILESUBTYPE VFT2_UNKNOWN -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904B0" - BEGIN - VALUE "CompanyName", "Yann Collet" - VALUE "FileDescription", "Extremely fast compression" - VALUE "FileVersion", LZ4_VERSION_STRING - VALUE "InternalName", "lz4.dll" - VALUE "LegalCopyright", "Copyright (C) 2013-2020, Yann Collet" - VALUE "OriginalFilename", "lz4.dll" - VALUE "ProductName", "LZ4" - VALUE "ProductVersion", LZ4_VERSION_STRING - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0409, 1200 - END -END - -#endif diff --git a/librocksdb-sys/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj b/librocksdb-sys/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj deleted file mode 100644 index 8e7ee3b..0000000 --- a/librocksdb-sys/lz4/build/VS2017/liblz4-dll/liblz4-dll.vcxproj +++ /dev/null @@ -1,183 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9800039D-4AAA-43A4-BB78-FEF6F4836927} - Win32Proj - liblz4-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - liblz4-dll - - - - DynamicLibrary - true - Unicode - v141 - - - DynamicLibrary - true - Unicode - v141 - - - DynamicLibrary - false - Unicode - true - v141 - - - DynamicLibrary - false - Unicode - true - v141 - - - - - - - - - - - - - - - - - - - true - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - true - true - true - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/liblz4/liblz4.vcxproj b/librocksdb-sys/lz4/build/VS2017/liblz4/liblz4.vcxproj deleted file mode 100644 index 948f7db..0000000 --- a/librocksdb-sys/lz4/build/VS2017/liblz4/liblz4.vcxproj +++ /dev/null @@ -1,179 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476} - Win32Proj - liblz4 - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - StaticLibrary - true - Unicode - v141 - - - StaticLibrary - true - Unicode - v141 - - - StaticLibrary - false - Unicode - true - v141 - - - StaticLibrary - false - Unicode - true - v141 - - - - - - - - - - - - - - - - - - - true - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - true - true - true - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2017/lz4.sln b/librocksdb-sys/lz4/build/VS2017/lz4.sln deleted file mode 100644 index 6a2779f..0000000 --- a/librocksdb-sys/lz4/build/VS2017/lz4.sln +++ /dev/null @@ -1,103 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.28307.271 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblz4-dll", "liblz4-dll\liblz4-dll.vcxproj", "{9800039D-4AAA-43A4-BB78-FEF6F4836927}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblz4", "liblz4\liblz4.vcxproj", "{9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fuzzer", "fuzzer\fuzzer.vcxproj", "{18B9F1A7-9C66-4352-898B-30804DADE0FD}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench", "fullbench\fullbench.vcxproj", "{6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "frametest", "frametest\frametest.vcxproj", "{39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "datagen", "datagen\datagen.vcxproj", "{D745AE2F-596A-403A-9B91-81A8C6779243}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench-dll", "fullbench-dll\fullbench-dll.vcxproj", "{13992FD2-077E-4954-B065-A428198201A9}" - ProjectSection(ProjectDependencies) = postProject - {9800039D-4AAA-43A4-BB78-FEF6F4836927} = {9800039D-4AAA-43A4-BB78-FEF6F4836927} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lz4", "lz4\lz4.vcxproj", "{60A3115E-B988-41EE-8815-F4D4F253D866}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|Win32.ActiveCfg = Debug|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|Win32.Build.0 = Debug|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|x64.ActiveCfg = Debug|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|x64.Build.0 = Debug|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|Win32.ActiveCfg = Release|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|Win32.Build.0 = Release|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|x64.ActiveCfg = Release|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|x64.Build.0 = Release|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|Win32.ActiveCfg = Debug|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|Win32.Build.0 = Debug|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|x64.ActiveCfg = Debug|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|x64.Build.0 = Debug|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|Win32.ActiveCfg = Release|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|Win32.Build.0 = Release|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|x64.ActiveCfg = Release|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|x64.Build.0 = Release|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|Win32.ActiveCfg = Debug|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|Win32.Build.0 = Debug|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|x64.ActiveCfg = Debug|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|x64.Build.0 = Debug|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|Win32.ActiveCfg = Release|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|Win32.Build.0 = Release|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|x64.ActiveCfg = Release|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|x64.Build.0 = Release|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|Win32.ActiveCfg = Debug|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|Win32.Build.0 = Debug|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|x64.ActiveCfg = Debug|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|x64.Build.0 = Debug|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|Win32.ActiveCfg = Release|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|Win32.Build.0 = Release|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|x64.ActiveCfg = Release|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|x64.Build.0 = Release|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|Win32.ActiveCfg = Debug|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|Win32.Build.0 = Debug|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|x64.ActiveCfg = Debug|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|x64.Build.0 = Debug|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|Win32.ActiveCfg = Release|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|Win32.Build.0 = Release|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|x64.ActiveCfg = Release|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|x64.Build.0 = Release|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|Win32.ActiveCfg = Debug|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|Win32.Build.0 = Debug|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|x64.ActiveCfg = Debug|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|x64.Build.0 = Debug|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|Win32.ActiveCfg = Release|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|Win32.Build.0 = Release|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|x64.ActiveCfg = Release|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|x64.Build.0 = Release|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.ActiveCfg = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.Build.0 = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.ActiveCfg = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.Build.0 = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.ActiveCfg = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.Build.0 = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.ActiveCfg = Release|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.Build.0 = Release|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|Win32.ActiveCfg = Debug|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|Win32.Build.0 = Debug|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|x64.ActiveCfg = Debug|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|x64.Build.0 = Debug|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|Win32.ActiveCfg = Release|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|Win32.Build.0 = Release|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|x64.ActiveCfg = Release|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {BBC259B2-BABF-47CD-8A6A-7B8318A803AC} - EndGlobalSection -EndGlobal diff --git a/librocksdb-sys/lz4/build/VS2017/lz4/lz4.rc b/librocksdb-sys/lz4/build/VS2017/lz4/lz4.rc deleted file mode 100644 index 5eec36b..0000000 --- a/librocksdb-sys/lz4/build/VS2017/lz4/lz4.rc +++ /dev/null @@ -1,51 +0,0 @@ -// Microsoft Visual C++ generated resource script. -// - -#include "lz4.h" /* LZ4_VERSION_STRING */ -#define APSTUDIO_READONLY_SYMBOLS -#include "verrsrc.h" -#undef APSTUDIO_READONLY_SYMBOLS - - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) -LANGUAGE 9, 1 - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - PRODUCTVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - FILEFLAGSMASK VS_FFI_FILEFLAGSMASK -#ifdef _DEBUG - FILEFLAGS VS_FF_DEBUG -#else - FILEFLAGS 0x0L -#endif - FILEOS VOS_NT_WINDOWS32 - FILETYPE VFT_DLL - FILESUBTYPE VFT2_UNKNOWN -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904B0" - BEGIN - VALUE "CompanyName", "Yann Collet" - VALUE "FileDescription", "Extremely fast compression" - VALUE "FileVersion", LZ4_VERSION_STRING - VALUE "InternalName", "lz4.exe" - VALUE "LegalCopyright", "Copyright (C) 2013-2020, Yann Collet" - VALUE "OriginalFilename", "lz4.exe" - VALUE "ProductName", "LZ4" - VALUE "ProductVersion", LZ4_VERSION_STRING - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0409, 1200 - END -END - -#endif diff --git a/librocksdb-sys/lz4/build/VS2017/lz4/lz4.vcxproj b/librocksdb-sys/lz4/build/VS2017/lz4/lz4.vcxproj deleted file mode 100644 index f16c1ec..0000000 --- a/librocksdb-sys/lz4/build/VS2017/lz4/lz4.vcxproj +++ /dev/null @@ -1,175 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {60A3115E-B988-41EE-8815-F4D4F253D866} - lz4 - 8.1 - - - - Application - true - v141 - Unicode - - - Application - false - v141 - false - Unicode - - - Application - true - v141 - MultiByte - - - Application - false - v141 - true - MultiByte - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - false - - - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - false - - - - Level4 - Disabled - true - true - true - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - true - Console - false - false - - - - - Level3 - Disabled - true - true - - - - - Level3 - MaxSpeed - true - true - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - - - true - true - true - Console - - - - - Level3 - MaxSpeed - true - true - true - true - - - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/librocksdb-sys/lz4/build/VS2022/datagen/datagen.vcxproj b/librocksdb-sys/lz4/build/VS2022/datagen/datagen.vcxproj deleted file mode 100644 index 69034d4..0000000 --- a/librocksdb-sys/lz4/build/VS2022/datagen/datagen.vcxproj +++ /dev/null @@ -1,173 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {D745AE2F-596A-403A-9B91-81A8C6779243} - Win32Proj - datagen - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v143 - - - Application - true - Unicode - v143 - - - Application - false - Unicode - true - v143 - - - Application - false - Unicode - true - v143 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2022/frametest/frametest.vcxproj b/librocksdb-sys/lz4/build/VS2022/frametest/frametest.vcxproj deleted file mode 100644 index 6b7ff75..0000000 --- a/librocksdb-sys/lz4/build/VS2022/frametest/frametest.vcxproj +++ /dev/null @@ -1,180 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7} - Win32Proj - frametest - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v143 - - - Application - true - Unicode - v143 - - - Application - false - Unicode - true - v143 - - - Application - false - Unicode - true - v143 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2022/fullbench-dll/fullbench-dll.vcxproj b/librocksdb-sys/lz4/build/VS2022/fullbench-dll/fullbench-dll.vcxproj deleted file mode 100644 index 143dc06..0000000 --- a/librocksdb-sys/lz4/build/VS2022/fullbench-dll/fullbench-dll.vcxproj +++ /dev/null @@ -1,184 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {13992FD2-077E-4954-B065-A428198201A9} - Win32Proj - fullbench-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v143 - - - Application - true - Unicode - v143 - - - Application - false - Unicode - true - v143 - - - Application - false - Unicode - true - v143 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - $(SolutionDir)bin\$(Platform)_$(Configuration);%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2022/fullbench/fullbench.vcxproj b/librocksdb-sys/lz4/build/VS2022/fullbench/fullbench.vcxproj deleted file mode 100644 index 57f4b5a..0000000 --- a/librocksdb-sys/lz4/build/VS2022/fullbench/fullbench.vcxproj +++ /dev/null @@ -1,180 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E} - Win32Proj - fullbench - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v143 - - - Application - true - Unicode - v143 - - - Application - false - Unicode - true - v143 - - - Application - false - Unicode - true - v143 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2022/fuzzer/fuzzer.vcxproj b/librocksdb-sys/lz4/build/VS2022/fuzzer/fuzzer.vcxproj deleted file mode 100644 index 83482c2..0000000 --- a/librocksdb-sys/lz4/build/VS2022/fuzzer/fuzzer.vcxproj +++ /dev/null @@ -1,177 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {18B9F1A7-9C66-4352-898B-30804DADE0FD} - Win32Proj - fuzzer - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - v143 - - - Application - true - Unicode - v143 - - - Application - false - Unicode - true - v143 - - - Application - false - Unicode - true - v143 - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - Console - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - Console - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - Console - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - Console - true - true - true - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2022/liblz4-dll/liblz4-dll.rc b/librocksdb-sys/lz4/build/VS2022/liblz4-dll/liblz4-dll.rc deleted file mode 100644 index e089c24..0000000 --- a/librocksdb-sys/lz4/build/VS2022/liblz4-dll/liblz4-dll.rc +++ /dev/null @@ -1,51 +0,0 @@ -// Microsoft Visual C++ generated resource script. -// - -#include "lz4.h" /* LZ4_VERSION_STRING */ -#define APSTUDIO_READONLY_SYMBOLS -#include "verrsrc.h" -#undef APSTUDIO_READONLY_SYMBOLS - - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) -LANGUAGE 9, 1 - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - PRODUCTVERSION LZ4_VERSION_MAJOR,LZ4_VERSION_MINOR,LZ4_VERSION_RELEASE,0 - FILEFLAGSMASK VS_FFI_FILEFLAGSMASK -#ifdef _DEBUG - FILEFLAGS VS_FF_DEBUG -#else - FILEFLAGS 0x0L -#endif - FILEOS VOS_NT_WINDOWS32 - FILETYPE VFT_DLL - FILESUBTYPE VFT2_UNKNOWN -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904B0" - BEGIN - VALUE "CompanyName", "Yann Collet" - VALUE "FileDescription", "Extremely fast compression" - VALUE "FileVersion", LZ4_VERSION_STRING - VALUE "InternalName", "lz4.dll" - VALUE "LegalCopyright", "Copyright (C) 2013-2020, Yann Collet" - VALUE "OriginalFilename", "lz4.dll" - VALUE "ProductName", "LZ4" - VALUE "ProductVersion", LZ4_VERSION_STRING - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0409, 1200 - END -END - -#endif diff --git a/librocksdb-sys/lz4/build/VS2022/liblz4-dll/liblz4-dll.vcxproj b/librocksdb-sys/lz4/build/VS2022/liblz4-dll/liblz4-dll.vcxproj deleted file mode 100644 index 532ac75..0000000 --- a/librocksdb-sys/lz4/build/VS2022/liblz4-dll/liblz4-dll.vcxproj +++ /dev/null @@ -1,183 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9800039D-4AAA-43A4-BB78-FEF6F4836927} - Win32Proj - liblz4-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - liblz4-dll - - - - DynamicLibrary - true - Unicode - v143 - - - DynamicLibrary - true - Unicode - v143 - - - DynamicLibrary - false - Unicode - true - v143 - - - DynamicLibrary - false - Unicode - true - v143 - - - - - - - - - - - - - - - - - - - true - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - liblz4 - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - true - true - true - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2022/liblz4/liblz4.vcxproj b/librocksdb-sys/lz4/build/VS2022/liblz4/liblz4.vcxproj deleted file mode 100644 index fdddaaa..0000000 --- a/librocksdb-sys/lz4/build/VS2022/liblz4/liblz4.vcxproj +++ /dev/null @@ -1,179 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476} - Win32Proj - liblz4 - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - StaticLibrary - true - Unicode - v143 - - - StaticLibrary - true - Unicode - v143 - - - StaticLibrary - false - Unicode - true - v143 - - - StaticLibrary - false - Unicode - true - v143 - - - - - - - - - - - - - - - - - - - true - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - liblz4_static - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - false - MultiThreadedDebug - - - true - - - - - - - Level4 - Disabled - WIN32;_DEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreadedDebug - - - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - false - MultiThreaded - - - true - true - true - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;LZ4_DLL_EXPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - MultiThreaded - - - true - true - true - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/build/VS2022/lz4.sln b/librocksdb-sys/lz4/build/VS2022/lz4.sln deleted file mode 100644 index 6a2779f..0000000 --- a/librocksdb-sys/lz4/build/VS2022/lz4.sln +++ /dev/null @@ -1,103 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.28307.271 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblz4-dll", "liblz4-dll\liblz4-dll.vcxproj", "{9800039D-4AAA-43A4-BB78-FEF6F4836927}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblz4", "liblz4\liblz4.vcxproj", "{9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fuzzer", "fuzzer\fuzzer.vcxproj", "{18B9F1A7-9C66-4352-898B-30804DADE0FD}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench", "fullbench\fullbench.vcxproj", "{6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "frametest", "frametest\frametest.vcxproj", "{39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "datagen", "datagen\datagen.vcxproj", "{D745AE2F-596A-403A-9B91-81A8C6779243}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench-dll", "fullbench-dll\fullbench-dll.vcxproj", "{13992FD2-077E-4954-B065-A428198201A9}" - ProjectSection(ProjectDependencies) = postProject - {9800039D-4AAA-43A4-BB78-FEF6F4836927} = {9800039D-4AAA-43A4-BB78-FEF6F4836927} - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "lz4", "lz4\lz4.vcxproj", "{60A3115E-B988-41EE-8815-F4D4F253D866}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|Win32.ActiveCfg = Debug|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|Win32.Build.0 = Debug|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|x64.ActiveCfg = Debug|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Debug|x64.Build.0 = Debug|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|Win32.ActiveCfg = Release|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|Win32.Build.0 = Release|Win32 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|x64.ActiveCfg = Release|x64 - {9800039D-4AAA-43A4-BB78-FEF6F4836927}.Release|x64.Build.0 = Release|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|Win32.ActiveCfg = Debug|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|Win32.Build.0 = Debug|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|x64.ActiveCfg = Debug|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Debug|x64.Build.0 = Debug|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|Win32.ActiveCfg = Release|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|Win32.Build.0 = Release|Win32 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|x64.ActiveCfg = Release|x64 - {9092C5CC-3E71-41B3-BF68-4A7BDD8A5476}.Release|x64.Build.0 = Release|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|Win32.ActiveCfg = Debug|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|Win32.Build.0 = Debug|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|x64.ActiveCfg = Debug|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Debug|x64.Build.0 = Debug|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|Win32.ActiveCfg = Release|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|Win32.Build.0 = Release|Win32 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|x64.ActiveCfg = Release|x64 - {18B9F1A7-9C66-4352-898B-30804DADE0FD}.Release|x64.Build.0 = Release|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|Win32.ActiveCfg = Debug|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|Win32.Build.0 = Debug|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|x64.ActiveCfg = Debug|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Debug|x64.Build.0 = Debug|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|Win32.ActiveCfg = Release|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|Win32.Build.0 = Release|Win32 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|x64.ActiveCfg = Release|x64 - {6A4DF4EF-C77F-43C6-8901-DDCD20879E4E}.Release|x64.Build.0 = Release|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|Win32.ActiveCfg = Debug|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|Win32.Build.0 = Debug|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|x64.ActiveCfg = Debug|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Debug|x64.Build.0 = Debug|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|Win32.ActiveCfg = Release|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|Win32.Build.0 = Release|Win32 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|x64.ActiveCfg = Release|x64 - {39AD6ECC-8BAD-4368-95E4-A1AA2F077BB7}.Release|x64.Build.0 = Release|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|Win32.ActiveCfg = Debug|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|Win32.Build.0 = Debug|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|x64.ActiveCfg = Debug|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Debug|x64.Build.0 = Debug|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|Win32.ActiveCfg = Release|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|Win32.Build.0 = Release|Win32 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|x64.ActiveCfg = Release|x64 - {D745AE2F-596A-403A-9B91-81A8C6779243}.Release|x64.Build.0 = Release|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.ActiveCfg = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.Build.0 = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.ActiveCfg = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.Build.0 = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.ActiveCfg = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.Build.0 = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.ActiveCfg = Release|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.Build.0 = Release|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|Win32.ActiveCfg = Debug|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|Win32.Build.0 = Debug|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|x64.ActiveCfg = Debug|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Debug|x64.Build.0 = Debug|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|Win32.ActiveCfg = Release|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|Win32.Build.0 = Release|Win32 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|x64.ActiveCfg = Release|x64 - {60A3115E-B988-41EE-8815-F4D4F253D866}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {BBC259B2-BABF-47CD-8A6A-7B8318A803AC} - EndGlobalSection -EndGlobal diff --git a/librocksdb-sys/lz4/build/cmake/.gitignore b/librocksdb-sys/lz4/build/cmake/.gitignore deleted file mode 100644 index 0ad8240..0000000 --- a/librocksdb-sys/lz4/build/cmake/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# cmake build artefact - -CMakeCache.txt -CMakeFiles -*.cmake -Makefile -liblz4.pc -lz4c -install_manifest.txt -build diff --git a/librocksdb-sys/lz4/build/cmake/CMakeLists.txt b/librocksdb-sys/lz4/build/cmake/CMakeLists.txt deleted file mode 100644 index eb7007b..0000000 --- a/librocksdb-sys/lz4/build/cmake/CMakeLists.txt +++ /dev/null @@ -1,273 +0,0 @@ -# CMake support for LZ4 -# -# To the extent possible under law, the author(s) have dedicated all -# copyright and related and neighboring rights to this software to -# the public domain worldwide. This software is distributed without -# any warranty. -# -# For details, see . -# -# LZ4's CMake support is maintained by Evan Nemerson; when filing -# bugs please mention @nemequ to make sure I see it. - -cmake_minimum_required(VERSION 2.8.12) - -set(LZ4_TOP_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../..") - -# Parse version information -file(STRINGS "${LZ4_TOP_SOURCE_DIR}/lib/lz4.h" LZ4_VERSION_MAJOR REGEX "^#define LZ4_VERSION_MAJOR +([0-9]+) +.*$") -string(REGEX REPLACE "^#define LZ4_VERSION_MAJOR +([0-9]+) +.*$" "\\1" LZ4_VERSION_MAJOR "${LZ4_VERSION_MAJOR}") -file(STRINGS "${LZ4_TOP_SOURCE_DIR}/lib/lz4.h" LZ4_VERSION_MINOR REGEX "^#define LZ4_VERSION_MINOR +([0-9]+) +.*$") -string(REGEX REPLACE "^#define LZ4_VERSION_MINOR +([0-9]+) +.*$" "\\1" LZ4_VERSION_MINOR "${LZ4_VERSION_MINOR}") -file(STRINGS "${LZ4_TOP_SOURCE_DIR}/lib/lz4.h" LZ4_VERSION_RELEASE REGEX "^#define LZ4_VERSION_RELEASE +([0-9]+) +.*$") -string(REGEX REPLACE "^#define LZ4_VERSION_RELEASE +([0-9]+) +.*$" "\\1" LZ4_VERSION_RELEASE "${LZ4_VERSION_RELEASE}") -set(LZ4_VERSION_STRING "${LZ4_VERSION_MAJOR}.${LZ4_VERSION_MINOR}.${LZ4_VERSION_RELEASE}") -mark_as_advanced(LZ4_VERSION_STRING LZ4_VERSION_MAJOR LZ4_VERSION_MINOR LZ4_VERSION_RELEASE) - -if("${CMAKE_VERSION}" VERSION_LESS "3.0") - project(LZ4 C) -else() - cmake_policy (SET CMP0048 NEW) - project(LZ4 - VERSION ${LZ4_VERSION_STRING} - LANGUAGES C) -endif() - -option(LZ4_BUILD_CLI "Build lz4 program" ON) -option(LZ4_BUILD_LEGACY_LZ4C "Build lz4c program with legacy argument support" ON) - -# If LZ4 is being bundled in another project, we don't want to -# install anything. However, we want to let people override this, so -# we'll use the LZ4_BUNDLED_MODE variable to let them do that; just -# set it to OFF in your project before you add_subdirectory(lz4/contrib/cmake_unofficial). -get_directory_property(LZ4_PARENT_DIRECTORY PARENT_DIRECTORY) -if("${LZ4_BUNDLED_MODE}" STREQUAL "") - # Bundled mode hasn't been set one way or the other, set the default - # depending on whether or not we are the top-level project. - if("${LZ4_PARENT_DIRECTORY}" STREQUAL "") - set(LZ4_BUNDLED_MODE OFF) - else() - set(LZ4_BUNDLED_MODE ON) - endif() -endif() -mark_as_advanced(LZ4_BUNDLED_MODE) - -# CPack -if(NOT LZ4_BUNDLED_MODE AND NOT CPack_CMake_INCLUDED) - set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "LZ4 compression library") - set(CPACK_PACKAGE_DESCRIPTION_FILE "${LZ4_TOP_SOURCE_DIR}/README.md") - set(CPACK_RESOURCE_FILE_LICENSE "${LZ4_TOP_SOURCE_DIR}/LICENSE") - set(CPACK_PACKAGE_VERSION_MAJOR ${LZ4_VERSION_MAJOR}) - set(CPACK_PACKAGE_VERSION_MINOR ${LZ4_VERSION_MINOR}) - set(CPACK_PACKAGE_VERSION_PATCH ${LZ4_VERSION_RELEASE}) - include(CPack) -endif(NOT LZ4_BUNDLED_MODE AND NOT CPack_CMake_INCLUDED) - -# Allow people to choose whether to build shared or static libraries -# via the BUILD_SHARED_LIBS option unless we are in bundled mode, in -# which case we always use static libraries. -include(CMakeDependentOption) -CMAKE_DEPENDENT_OPTION(BUILD_SHARED_LIBS "Build shared libraries" ON "NOT LZ4_BUNDLED_MODE" OFF) -CMAKE_DEPENDENT_OPTION(BUILD_STATIC_LIBS "Build static libraries" OFF "BUILD_SHARED_LIBS" ON) - -if(NOT BUILD_SHARED_LIBS AND NOT BUILD_STATIC_LIBS) - message(FATAL_ERROR "Both BUILD_SHARED_LIBS and BUILD_STATIC_LIBS have been disabled") -endif() - -set(LZ4_LIB_SOURCE_DIR "${LZ4_TOP_SOURCE_DIR}/lib") -set(LZ4_PROG_SOURCE_DIR "${LZ4_TOP_SOURCE_DIR}/programs") - -include_directories("${LZ4_LIB_SOURCE_DIR}") - -# CLI sources -set(LZ4_SOURCES - "${LZ4_LIB_SOURCE_DIR}/lz4.c" - "${LZ4_LIB_SOURCE_DIR}/lz4hc.c" - "${LZ4_LIB_SOURCE_DIR}/lz4.h" - "${LZ4_LIB_SOURCE_DIR}/lz4hc.h" - "${LZ4_LIB_SOURCE_DIR}/lz4frame.c" - "${LZ4_LIB_SOURCE_DIR}/lz4frame.h" - "${LZ4_LIB_SOURCE_DIR}/xxhash.c") -set(LZ4_CLI_SOURCES - "${LZ4_PROG_SOURCE_DIR}/bench.c" - "${LZ4_PROG_SOURCE_DIR}/lz4cli.c" - "${LZ4_PROG_SOURCE_DIR}/lz4io.c" - "${LZ4_PROG_SOURCE_DIR}/datagen.c") - -# Whether to use position independent code for the static library. If -# we're building a shared library this is ignored and PIC is always -# used. -option(LZ4_POSITION_INDEPENDENT_LIB "Use position independent code for static library (if applicable)" ON) - -# liblz4 -set(LZ4_LIBRARIES_BUILT) -if(BUILD_SHARED_LIBS) - add_library(lz4_shared SHARED ${LZ4_SOURCES}) - target_include_directories(lz4_shared - PUBLIC $ - INTERFACE $) - set_target_properties(lz4_shared PROPERTIES - OUTPUT_NAME lz4 - SOVERSION "${LZ4_VERSION_MAJOR}" - VERSION "${LZ4_VERSION_STRING}") - if(MSVC) - target_compile_definitions(lz4_shared PRIVATE - LZ4_DLL_EXPORT=1) - endif() - list(APPEND LZ4_LIBRARIES_BUILT lz4_shared) -endif() -if(BUILD_STATIC_LIBS) - set(STATIC_LIB_NAME lz4) - if (MSVC AND BUILD_SHARED_LIBS) - set(STATIC_LIB_NAME lz4_static) - endif() - add_library(lz4_static STATIC ${LZ4_SOURCES}) - target_include_directories(lz4_static - PUBLIC $ - INTERFACE $) - set_target_properties(lz4_static PROPERTIES - OUTPUT_NAME ${STATIC_LIB_NAME} - POSITION_INDEPENDENT_CODE ${LZ4_POSITION_INDEPENDENT_LIB}) - list(APPEND LZ4_LIBRARIES_BUILT lz4_static) -endif() - -if(BUILD_STATIC_LIBS) - set(LZ4_LINK_LIBRARY lz4_static) -else() - list(APPEND LZ4_CLI_SOURCES ${LZ4_SOURCES}) -endif() - -# lz4 -if (LZ4_BUILD_CLI) - set(LZ4_PROGRAMS_BUILT lz4cli) - add_executable(lz4cli ${LZ4_CLI_SOURCES}) - set_target_properties(lz4cli PROPERTIES OUTPUT_NAME lz4) - if (BUILD_STATIC_LIBS) - target_link_libraries(lz4cli ${LZ4_LINK_LIBRARY}) - endif() -endif() - -# lz4c -if (LZ4_BUILD_LEGACY_LZ4C) - list(APPEND LZ4_PROGRAMS_BUILT lz4c) - add_executable(lz4c ${LZ4_CLI_SOURCES}) - set_target_properties(lz4c PROPERTIES COMPILE_DEFINITIONS "ENABLE_LZ4C_LEGACY_OPTIONS") - if (BUILD_STATIC_LIBS) - target_link_libraries(lz4c ${LZ4_LINK_LIBRARY}) - endif() -endif() - -# Extra warning flags -include (CheckCCompilerFlag) -foreach (flag - # GCC-style - -Wall - -Wextra - -Wundef - -Wcast-qual - -Wcast-align - -Wshadow - -Wswitch-enum - -Wdeclaration-after-statement - -Wstrict-prototypes - -Wpointer-arith - - # MSVC-style - /W4) - # Because https://gcc.gnu.org/wiki/FAQ#wnowarning - string(REGEX REPLACE "\\-Wno\\-(.+)" "-W\\1" flag_to_test "${flag}") - string(REGEX REPLACE "[^a-zA-Z0-9]+" "_" test_name "CFLAG_${flag_to_test}") - - check_c_compiler_flag("${ADD_COMPILER_FLAGS_PREPEND} ${flag_to_test}" ${test_name}) - - if(${test_name}) - set(CMAKE_C_FLAGS "${flag} ${CMAKE_C_FLAGS}") - endif() - - unset(test_name) - unset(flag_to_test) -endforeach (flag) - -if(NOT LZ4_BUNDLED_MODE) - include(GNUInstallDirs) - - install(TARGETS ${LZ4_PROGRAMS_BUILT} - BUNDLE DESTINATION "${CMAKE_INSTALL_BINDIR}" - RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}") - install(TARGETS ${LZ4_LIBRARIES_BUILT} - EXPORT lz4Targets - LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" - ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" - RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}") - install(FILES - "${LZ4_LIB_SOURCE_DIR}/lz4.h" - "${LZ4_LIB_SOURCE_DIR}/lz4frame.h" - "${LZ4_LIB_SOURCE_DIR}/lz4hc.h" - DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}") - install(FILES "${LZ4_PROG_SOURCE_DIR}/lz4.1" - DESTINATION "${CMAKE_INSTALL_MANDIR}/man1") - install(FILES "${CMAKE_CURRENT_BINARY_DIR}/liblz4.pc" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") - - include(CMakePackageConfigHelpers) - write_basic_package_version_file( - "${CMAKE_CURRENT_BINARY_DIR}/lz4ConfigVersion.cmake" - VERSION ${LZ4_VERSION_STRING} - COMPATIBILITY SameMajorVersion) - - set(LZ4_PKG_INSTALLDIR "${CMAKE_INSTALL_LIBDIR}/cmake/lz4") - configure_package_config_file( - "${CMAKE_CURRENT_LIST_DIR}/lz4Config.cmake.in" - "${CMAKE_CURRENT_BINARY_DIR}/lz4Config.cmake" - INSTALL_DESTINATION ${LZ4_PKG_INSTALLDIR}) - export(EXPORT lz4Targets - FILE ${CMAKE_CURRENT_BINARY_DIR}/lz4Targets.cmake - NAMESPACE LZ4::) - - install(EXPORT lz4Targets - FILE lz4Targets.cmake - NAMESPACE LZ4:: - DESTINATION ${LZ4_PKG_INSTALLDIR}) - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/lz4Config.cmake - ${CMAKE_CURRENT_BINARY_DIR}/lz4ConfigVersion.cmake - DESTINATION ${LZ4_PKG_INSTALLDIR}) - - # install lz4cat and unlz4 symlinks on *nix - if(UNIX AND LZ4_BUILD_CLI) - install(CODE " - foreach(f lz4cat unlz4) - set(dest \"\$ENV{DESTDIR}${CMAKE_INSTALL_FULL_BINDIR}/\${f}\") - message(STATUS \"Symlinking: \${dest} -> lz4\") - execute_process( - COMMAND \"${CMAKE_COMMAND}\" -E create_symlink lz4 \"\${dest}\") - endforeach() - ") - - # create manpage aliases - foreach(f lz4cat unlz4) - file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/${f}.1" ".so man1/lz4.1\n") - install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${f}.1" - DESTINATION "${CMAKE_INSTALL_MANDIR}/man1") - endforeach() - endif(UNIX AND LZ4_BUILD_CLI) -endif(NOT LZ4_BUNDLED_MODE) - -# pkg-config -set(PREFIX "${CMAKE_INSTALL_PREFIX}") - -if("${CMAKE_INSTALL_FULL_LIBDIR}" STREQUAL "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") - set(LIBDIR "\${prefix}/${CMAKE_INSTALL_LIBDIR}") -else() - set(LIBDIR "${CMAKE_INSTALL_FULL_LIBDIR}") -endif() - -if("${CMAKE_INSTALL_FULL_INCLUDEDIR}" STREQUAL "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}") - set(INCLUDEDIR "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") -else() - set(INCLUDEDIR "${CMAKE_INSTALL_FULL_INCLUDEDIR}") -endif() - -# for liblz4.pc substitution -set(VERSION ${LZ4_VERSION_STRING}) -configure_file(${LZ4_LIB_SOURCE_DIR}/liblz4.pc.in liblz4.pc @ONLY) diff --git a/librocksdb-sys/lz4/build/cmake/lz4Config.cmake.in b/librocksdb-sys/lz4/build/cmake/lz4Config.cmake.in deleted file mode 100644 index e9c9473..0000000 --- a/librocksdb-sys/lz4/build/cmake/lz4Config.cmake.in +++ /dev/null @@ -1,2 +0,0 @@ -@PACKAGE_INIT@ -include( "${CMAKE_CURRENT_LIST_DIR}/lz4Targets.cmake" ) \ No newline at end of file diff --git a/librocksdb-sys/lz4/contrib/debian/changelog b/librocksdb-sys/lz4/contrib/debian/changelog deleted file mode 100644 index 87ac016..0000000 --- a/librocksdb-sys/lz4/contrib/debian/changelog +++ /dev/null @@ -1,10 +0,0 @@ -liblz4 (1.7.2) unstable; urgency=low - - * Changed : moved to versioning; package, cli and library have same version number - * Improved: Small decompression speed boost (+4%) - * Improved: Performance on ARMv6 and ARMv7 - * Added : Debianization, by Evgeniy Polyakov - * Makefile: Generates object files (*.o) for faster (re)compilation on low power systems - * Fix : cli : crash on some invalid inputs - - -- Yann Collet Sun, 28 Jun 2015 01:00:00 +0000 diff --git a/librocksdb-sys/lz4/contrib/debian/compat b/librocksdb-sys/lz4/contrib/debian/compat deleted file mode 100644 index 7f8f011..0000000 --- a/librocksdb-sys/lz4/contrib/debian/compat +++ /dev/null @@ -1 +0,0 @@ -7 diff --git a/librocksdb-sys/lz4/contrib/debian/control b/librocksdb-sys/lz4/contrib/debian/control deleted file mode 100644 index ac3b460..0000000 --- a/librocksdb-sys/lz4/contrib/debian/control +++ /dev/null @@ -1,23 +0,0 @@ -Source: liblz4 -Section: devel -Priority: optional -Maintainer: Evgeniy Polyakov -Build-Depends: - cmake (>= 2.6), - debhelper (>= 7.0.50~), - cdbs -Standards-Version: 3.8.0 -Homepage: http://www.lz4.org/ -Vcs-Git: git://github.com/lz4/lz4.git -Vcs-Browser: https://github.com/lz4/lz4 - -Package: liblz4 -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends} -Description: Extremely Fast Compression algorithm http://www.lz4.org - -Package: liblz4-dev -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends} -Description: Extremely Fast Compression algorithm http://www.lz4.org - Development files. diff --git a/librocksdb-sys/lz4/contrib/debian/copyright b/librocksdb-sys/lz4/contrib/debian/copyright deleted file mode 100644 index 0914768..0000000 --- a/librocksdb-sys/lz4/contrib/debian/copyright +++ /dev/null @@ -1,9 +0,0 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: liblz4 -Upstream-Contact: Yann Collet -Source: https://github.com/lz4/lz4 - -Files: * -Copyright: (C) 2011-2020 Yann Collet -License: GPL-2+ - The full text of license: https://github.com/lz4/lz4/blob/dev/lib/LICENSE diff --git a/librocksdb-sys/lz4/contrib/debian/dirs b/librocksdb-sys/lz4/contrib/debian/dirs deleted file mode 100644 index e772481..0000000 --- a/librocksdb-sys/lz4/contrib/debian/dirs +++ /dev/null @@ -1 +0,0 @@ -usr/bin diff --git a/librocksdb-sys/lz4/contrib/debian/docs b/librocksdb-sys/lz4/contrib/debian/docs deleted file mode 100644 index e69de29..0000000 diff --git a/librocksdb-sys/lz4/contrib/debian/liblz4-dev.install b/librocksdb-sys/lz4/contrib/debian/liblz4-dev.install deleted file mode 100644 index 3a02909..0000000 --- a/librocksdb-sys/lz4/contrib/debian/liblz4-dev.install +++ /dev/null @@ -1,2 +0,0 @@ -usr/include/lz4* -usr/lib/liblz4.so diff --git a/librocksdb-sys/lz4/contrib/debian/liblz4.install b/librocksdb-sys/lz4/contrib/debian/liblz4.install deleted file mode 100644 index e444956..0000000 --- a/librocksdb-sys/lz4/contrib/debian/liblz4.install +++ /dev/null @@ -1,2 +0,0 @@ -usr/lib/liblz4.so.* -usr/bin/* diff --git a/librocksdb-sys/lz4/contrib/debian/rules b/librocksdb-sys/lz4/contrib/debian/rules deleted file mode 100755 index c897bc5..0000000 --- a/librocksdb-sys/lz4/contrib/debian/rules +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/make -f - -include /usr/share/cdbs/1/rules/debhelper.mk -include /usr/share/cdbs/1/class/cmake.mk - - -DEB_CMAKE_EXTRA_FLAGS := -DCMAKE_BUILD_TYPE=RelWithDebInfo ../../build/cmake diff --git a/librocksdb-sys/lz4/contrib/djgpp/LICENSE b/librocksdb-sys/lz4/contrib/djgpp/LICENSE deleted file mode 100644 index fee0d3b..0000000 --- a/librocksdb-sys/lz4/contrib/djgpp/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2014, lpsantil -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/librocksdb-sys/lz4/contrib/djgpp/Makefile b/librocksdb-sys/lz4/contrib/djgpp/Makefile deleted file mode 100644 index 8cd3580..0000000 --- a/librocksdb-sys/lz4/contrib/djgpp/Makefile +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2015, Louis P. Santillan -# All rights reserved. -# See LICENSE for licensing details. -DESTDIR ?= /opt/local - -# Pulled the code below from lib/Makefile. Might be nicer to derive this somehow without sed -# Version numbers -VERSION ?= 129 -RELEASE ?= r$(VERSION) -LIBVER_MAJOR=$(shell sed -n '/define LZ4_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < lib/lz4.h) -LIBVER_MINOR=$(shell sed -n '/define LZ4_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < lib/lz4.h) -LIBVER_PATCH=$(shell sed -n '/define LZ4_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < lib/lz4.h) -LIBVER=$(LIBVER_MAJOR).$(LIBVER_MINOR).$(LIBVER_PATCH) - -###################################################################### - -CROSS ?= i586-pc-msdosdjgpp -CC = $(CROSS)-gcc -AR = $(CROSS)-ar -LD = $(CROSS)-gcc - -CFLAGS ?= -O3 -std=gnu99 -Wall -Wextra -Wundef -Wshadow -Wcast-qual -Wcast-align -Wstrict-prototypes -pedantic -DLZ4_VERSION=\"$(RELEASE)\" -LDFLAGS ?= -s -SRC = programs/bench.c programs/lz4io.c programs/lz4cli.c -OBJ = $(SRC:.c=.o) -SDEPS = $(SRC:.c=.d) -IDIR = lib -EDIR = . -EXE = lz4.exe -LNK = lz4 -LDIR = lib -LSRC = lib/lz4.c lib/lz4hc.c lib/lz4frame.c lib/xxhash.c -INC = $(LSRC:.c=.h) -LOBJ = $(LSRC:.c=.o) -LSDEPS = $(LSRC:.c=.d) -LIB = $(LDIR)/lib$(LNK).a - -# Since LDFLAGS defaults to "-s", probably better to override unless -# you have a default you would like to maintain -ifeq ($(WITH_DEBUG), 1) - CFLAGS += -g - LDFLAGS += -g -endif - -# Since LDFLAGS defaults to "-s", probably better to override unless -# you have a default you would like to maintain -ifeq ($(WITH_PROFILING), 1) - CFLAGS += -pg - LDFLAGS += -pg -endif - -%.o: %.c $(INC) Makefile - $(CC) $(CFLAGS) -MMD -MP -I$(IDIR) -c $< -o $@ - -%.exe: %.o $(LIB) Makefile - $(LD) $< -L$(LDIR) -l$(LNK) $(LDFLAGS) $(LIBDEP) -o $@ - -###################################################################### -######################## DO NOT MODIFY BELOW ######################### -###################################################################### - -.PHONY: all install uninstall showconfig gstat gpush - -all: $(LIB) $(EXE) - -$(LIB): $(LOBJ) - $(AR) -rcs $@ $^ - -$(EXE): $(LOBJ) $(OBJ) - $(LD) $(LDFLAGS) $(LOBJ) $(OBJ) -o $(EDIR)/$@ - -clean: - rm -f $(OBJ) $(EXE) $(LOBJ) $(LIB) *.tmp $(SDEPS) $(LSDEPS) $(TSDEPS) - -install: $(INC) $(LIB) $(EXE) - mkdir -p $(DESTDIR)/bin $(DESTDIR)/include $(DESTDIR)/lib - rm -f .footprint - echo $(DESTDIR)/bin/$(EXE) >> .footprint - cp -v $(EXE) $(DESTDIR)/bin/ - @for T in $(LIB); \ - do ( \ - echo $(DESTDIR)/$$T >> .footprint; \ - cp -v --parents $$T $(DESTDIR) \ - ); done - @for T in $(INC); \ - do ( \ - echo $(DESTDIR)/include/`basename -a $$T` >> .footprint; \ - cp -v $$T $(DESTDIR)/include/ \ - ); done - -uninstall: .footprint - @for T in $(shell cat .footprint); do rm -v $$T; done - --include $(SDEPS) $(LSDEPS) - -showconfig: - @echo "PWD="$(PWD) - @echo "VERSION="$(VERSION) - @echo "RELEASE="$(RELEASE) - @echo "LIBVER_MAJOR="$(LIBVER_MAJOR) - @echo "LIBVER_MINOR="$(LIBVER_MINOR) - @echo "LIBVER_PATCH="$(LIBVER_PATCH) - @echo "LIBVER="$(LIBVER) - @echo "CROSS="$(CROSS) - @echo "CC="$(CC) - @echo "AR="$(AR) - @echo "LD="$(LD) - @echo "DESTDIR="$(DESTDIR) - @echo "CFLAGS="$(CFLAGS) - @echo "LDFLAGS="$(LDFLAGS) - @echo "SRC="$(SRC) - @echo "OBJ="$(OBJ) - @echo "IDIR="$(IDIR) - @echo "INC="$(INC) - @echo "EDIR="$(EDIR) - @echo "EXE="$(EXE) - @echo "LDIR="$(LDIR) - @echo "LSRC="$(LSRC) - @echo "LOBJ="$(LOBJ) - @echo "LNK="$(LNK) - @echo "LIB="$(LIB) - @echo "SDEPS="$(SDEPS) - @echo "LSDEPS="$(LSDEPS) - -gstat: - git status - -gpush: - git commit - git push diff --git a/librocksdb-sys/lz4/contrib/djgpp/README.MD b/librocksdb-sys/lz4/contrib/djgpp/README.MD deleted file mode 100644 index 0f4cae6..0000000 --- a/librocksdb-sys/lz4/contrib/djgpp/README.MD +++ /dev/null @@ -1,21 +0,0 @@ -# lz4 for DOS/djgpp -This file details on how to compile lz4.exe, and liblz4.a for use on DOS/djgpp using -Andrew Wu's build-djgpp cross compilers ([GH][0], [Binaries][1]) on OSX, Linux. - -## Setup -* Download a djgpp tarball [binaries][1] for your platform. -* Extract and install it (`tar jxvf djgpp-linux64-gcc492.tar.bz2`). Note the path. We'll assume `/home/user/djgpp`. -* Add the `bin` folder to your `PATH`. In bash, do `export PATH=/home/user/djgpp/bin:$PATH`. -* The `Makefile` in `contrib/djgpp/` sets up `CC`, `AR`, `LD` for you. So, `CC=i586-pc-msdosdjgpp-gcc`, `AR=i586-pc-msdosdjgpp-ar`, `LD=i586-pc-msdosdjgpp-gcc`. - -## Building LZ4 for DOS -In the base dir of lz4 and with `contrib/djgpp/Makefile`, try: -Try: -* `make -f contrib/djgpp/Makefile` -* `make -f contrib/djgpp/Makefile liblz4.a` -* `make -f contrib/djgpp/Makefile lz4.exe` -* `make -f contrib/djgpp/Makefile DESTDIR=/home/user/dos install`, however it doesn't make much sense on a \*nix. -* You can also do `make -f contrib/djgpp/Makefile uninstall` - -[0]: https://github.com/andrewwutw/build-djgpp -[1]: https://github.com/andrewwutw/build-djgpp/releases diff --git a/librocksdb-sys/lz4/contrib/gen_manual/.gitignore b/librocksdb-sys/lz4/contrib/gen_manual/.gitignore deleted file mode 100644 index 6ea967f..0000000 --- a/librocksdb-sys/lz4/contrib/gen_manual/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# build artefact -gen_manual diff --git a/librocksdb-sys/lz4/contrib/gen_manual/Makefile b/librocksdb-sys/lz4/contrib/gen_manual/Makefile deleted file mode 100644 index 262c80d..0000000 --- a/librocksdb-sys/lz4/contrib/gen_manual/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -# ################################################################ -# Copyright (C) Przemyslaw Skibinski 2016-present -# All rights reserved. -# -# BSD license -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, this -# list of conditions and the following disclaimer in the documentation and/or -# other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# You can contact the author at : -# - LZ4 source repository : https://github.com/Cyan4973/lz4 -# - LZ4 forum froup : https://groups.google.com/forum/#!forum/lz4c -# ################################################################ - - -CXXFLAGS ?= -O2 -CXXFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow -Wstrict-aliasing=1 -Wswitch-enum -Wno-comment -CPPFLAGS += $(MOREFLAGS) -FLAGS = $(CXXFLAGS) $(CPPFLAGS) $(LDFLAGS) - -LZ4API = ../../lib/lz4.h -LZ4MANUAL = ../../doc/lz4_manual.html -LZ4FAPI = ../../lib/lz4frame.h -LZ4FMANUAL = ../../doc/lz4frame_manual.html -LIBVER_MAJOR_SCRIPT:=`sed -n '/define LZ4_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LZ4API)` -LIBVER_MINOR_SCRIPT:=`sed -n '/define LZ4_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LZ4API)` -LIBVER_PATCH_SCRIPT:=`sed -n '/define LZ4_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LZ4API)` -LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) -LZ4VER := $(shell echo $(LIBVER_SCRIPT)) - -# Define *.exe as extension for Windows systems -ifneq (,$(filter Windows%,$(OS))) -EXT =.exe -else -EXT = -endif - - -.PHONY: default -default: gen_manual - -gen_manual: gen_manual.cpp - $(CXX) $(FLAGS) $^ -o $@$(EXT) - -$(LZ4MANUAL) : gen_manual $(LZ4API) - echo "Update lz4 manual in /doc" - ./gen_manual $(LZ4VER) $(LZ4API) $@ - -$(LZ4FMANUAL) : gen_manual $(LZ4FAPI) - echo "Update lz4frame manual in /doc" - ./gen_manual $(LZ4VER) $(LZ4FAPI) $@ - -.PHONY: manuals -manuals: $(LZ4MANUAL) $(LZ4FMANUAL) - -.PHONY: clean -clean: - @$(RM) gen_manual$(EXT) - @echo Cleaning completed diff --git a/librocksdb-sys/lz4/contrib/gen_manual/README.md b/librocksdb-sys/lz4/contrib/gen_manual/README.md deleted file mode 100644 index 7664ac6..0000000 --- a/librocksdb-sys/lz4/contrib/gen_manual/README.md +++ /dev/null @@ -1,31 +0,0 @@ -gen_manual - a program for automatic generation of manual from source code -========================================================================== - -#### Introduction - -This simple C++ program generates a single-page HTML manual from `lz4.h`. - -The format of recognized comment blocks is following: -- comments of type `/*!` mean: this is a function declaration; switch comments with declarations -- comments of type `/**` and `/*-` mean: this is a comment; use a `

` header for the first line -- comments of type `/*=` and `/**=` mean: use a `

` header and show also all functions until first empty line -- comments of type `/*X` where `X` is different from above-mentioned are ignored - -Moreover: -- `LZ4LIB_API` is removed to improve readability -- `typedef` are detected and included even if uncommented -- comments of type `/**<` and `/*!<` are detected and only function declaration is highlighted (bold) - - -#### Usage - -The program requires 3 parameters: -``` -gen_manual [lz4_version] [input_file] [output_html] -``` - -To compile program and generate lz4 manual we have used: -``` -make -./gen_manual.exe 1.7.3 ../../lib/lz4.h lz4_manual.html -``` diff --git a/librocksdb-sys/lz4/contrib/gen_manual/gen-lz4-manual.sh b/librocksdb-sys/lz4/contrib/gen_manual/gen-lz4-manual.sh deleted file mode 100644 index 73a7214..0000000 --- a/librocksdb-sys/lz4/contrib/gen_manual/gen-lz4-manual.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -LIBVER_MAJOR_SCRIPT=`sed -n '/define LZ4_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ../../lib/lz4.h` -LIBVER_MINOR_SCRIPT=`sed -n '/define LZ4_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ../../lib/lz4.h` -LIBVER_PATCH_SCRIPT=`sed -n '/define LZ4_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ../../lib/lz4.h` -LIBVER_SCRIPT=$LIBVER_MAJOR_SCRIPT.$LIBVER_MINOR_SCRIPT.$LIBVER_PATCH_SCRIPT - -echo LZ4_VERSION=$LIBVER_SCRIPT -./gen_manual "lz4 $LIBVER_SCRIPT" ../../lib/lz4.h ./lz4_manual.html -./gen_manual "lz4frame $LIBVER_SCRIPT" ../../lib/lz4frame.h ./lz4frame_manual.html diff --git a/librocksdb-sys/lz4/contrib/gen_manual/gen_manual.cpp b/librocksdb-sys/lz4/contrib/gen_manual/gen_manual.cpp deleted file mode 100644 index d5fe702..0000000 --- a/librocksdb-sys/lz4/contrib/gen_manual/gen_manual.cpp +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright (c) 2016-present, Przemyslaw Skibinski -All rights reserved. - -BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -You can contact the author at : -- LZ4 homepage : http://www.lz4.org -- LZ4 source repository : https://github.com/lz4/lz4 -*/ - -#include -#include -#include -#include -using namespace std; - - -/* trim string at the beginning and at the end */ -void trim(string& s, string characters) -{ - size_t p = s.find_first_not_of(characters); - s.erase(0, p); - - p = s.find_last_not_of(characters); - if (string::npos != p) - s.erase(p+1); -} - - -/* trim C++ style comments */ -void trim_comments(string &s) -{ - size_t spos, epos; - - spos = s.find("/*"); - epos = s.find("*/"); - s = s.substr(spos+3, epos-(spos+3)); -} - - -/* get lines until a given terminator */ -vector get_lines(vector& input, int& linenum, string terminator) -{ - vector out; - string line; - - while ((size_t)linenum < input.size()) { - line = input[linenum]; - - if (terminator.empty() && line.empty()) { linenum--; break; } - - size_t const epos = line.find(terminator); - if (!terminator.empty() && epos!=string::npos) { - out.push_back(line); - break; - } - out.push_back(line); - linenum++; - } - return out; -} - - -/* print line with LZ4LIB_API removed and C++ comments not bold */ -void print_line(stringstream &sout, string line) -{ - size_t spos, epos; - - if (line.substr(0,11) == "LZ4LIB_API ") line = line.substr(11); - if (line.substr(0,12) == "LZ4FLIB_API ") line = line.substr(12); - spos = line.find("/*"); - epos = line.find("*/"); - if (spos!=string::npos && epos!=string::npos) { - sout << line.substr(0, spos); - sout << "" << line.substr(spos) << "" << '\n'; - } else { - sout << line << '\n'; - } -} - - -int main(int argc, char *argv[]) { - char exclam; - int linenum, chapter = 1; - vector input, lines, comments, chapters; - string line, version; - size_t spos, l; - stringstream sout; - ifstream istream; - ofstream ostream; - - if (argc < 4) { - cout << "usage: " << argv[0] << " [lz4_version] [input_file] [output_html]" << endl; - return 1; - } - - version = string(argv[1]) + " Manual"; - - istream.open(argv[2], ifstream::in); - if (!istream.is_open()) { - cout << "Error opening file " << argv[2] << endl; - return 1; - } - - ostream.open(argv[3], ifstream::out); - if (!ostream.is_open()) { - cout << "Error opening file " << argv[3] << endl; - return 1; - } - - while (getline(istream, line)) { - input.push_back(line); - } - - for (linenum=0; (size_t)linenum < input.size(); linenum++) { - line = input[linenum]; - - /* typedefs are detected and included even if uncommented */ - if (line.substr(0,7) == "typedef" && line.find("{")!=string::npos) { - lines = get_lines(input, linenum, "}"); - sout << "
";
-            for (l=0; l

" << endl; - continue; - } - - /* comments of type / * * < and / * ! < are detected, and only function declaration is highlighted (bold) */ - if ((line.find("/**<")!=string::npos || line.find("/*!<")!=string::npos) - && line.find("*/")!=string::npos) { - sout << "
";
-            print_line(sout, line);
-            sout << "

" << endl; - continue; - } - - spos = line.find("/**="); - if (spos==string::npos) { - spos = line.find("/*!"); - if (spos==string::npos) - spos = line.find("/**"); - if (spos==string::npos) - spos = line.find("/*-"); - if (spos==string::npos) - spos = line.find("/*="); - if (spos==string::npos) - continue; - exclam = line[spos+2]; - } - else exclam = '='; - - comments = get_lines(input, linenum, "*/"); - if (!comments.empty()) comments[0] = line.substr(spos+3); - if (!comments.empty()) - comments[comments.size()-1] = comments[comments.size()-1].substr(0, comments[comments.size()-1].find("*/")); - for (l=0; l"; - for (l=0; l

"; - for (l=0; l
" << endl << endl; - } else if (exclam == '=') { /* comments of type / * = and / * * = mean: use a

header and show also all functions until first empty line */ - trim(comments[0], " "); - sout << "

" << comments[0] << "

";
-            for (l=1; l
";
-            lines = get_lines(input, ++linenum, "");
-            for (l=0; l
" << endl; - } else { /* comments of type / * * and / * - mean: this is a comment; use a

header for the first line */ - if (comments.empty()) continue; - - trim(comments[0], " "); - sout << "

" << comments[0] << "

";
-            chapters.push_back(comments[0]);
-            chapter++;
-
-            for (l=1; l 1)
-                sout << "
" << endl << endl; - else - sout << "
" << endl << endl; - } - } - - ostream << "\n\n\n" << version << "\n\n" << endl; - ostream << "

" << version << "

\n"; - - ostream << "
\n

Contents

\n
    \n"; - for (size_t i=0; i" << chapters[i].c_str() << "\n"; - ostream << "
\n
\n"; - - ostream << sout.str(); - ostream << "" << endl << "" << endl; - - return 0; -} diff --git a/librocksdb-sys/lz4/contrib/meson/README.md b/librocksdb-sys/lz4/contrib/meson/README.md deleted file mode 100644 index 1dc1bd9..0000000 --- a/librocksdb-sys/lz4/contrib/meson/README.md +++ /dev/null @@ -1,34 +0,0 @@ -Meson build system for lz4 -========================== - -Meson is a build system designed to optimize programmer productivity. -It aims to do this by providing simple, out-of-the-box support for -modern software development tools and practices, such as unit tests, -coverage reports, Valgrind, CCache and the like. - -This Meson build system is provided with no guarantee. - -## How to build - -`cd` to this meson directory (`contrib/meson`) - -```sh -meson setup --buildtype=release -Ddefault_library=shared -Dprograms=true builddir -cd builddir -ninja # to build -ninja install # to install -``` - -You might want to install it in staging directory: - -```sh -DESTDIR=./staging ninja install -``` - -To configure build options, use: - -```sh -meson configure -``` - -See [man meson(1)](https://manpages.debian.org/testing/meson/meson.1.en.html). diff --git a/librocksdb-sys/lz4/contrib/meson/meson.build b/librocksdb-sys/lz4/contrib/meson/meson.build deleted file mode 100644 index 39672c8..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson.build +++ /dev/null @@ -1,27 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -# This is a dummy meson file. -# The intention is that it can be easily moved to the root of the project -# (together with meson_options.txt) and packaged for wrapdb. - -project( - 'lz4', - ['c'], - license: 'BSD-2-Clause-Patent AND GPL-2.0-or-later', - default_options: [ - 'c_std=c99', - 'buildtype=release', - 'warning_level=3' - ], - version: 'DUMMY', - meson_version: '>=0.49.0' -) - -subdir('meson') diff --git a/librocksdb-sys/lz4/contrib/meson/meson/GetLz4LibraryVersion.py b/librocksdb-sys/lz4/contrib/meson/meson/GetLz4LibraryVersion.py deleted file mode 100644 index d8abfcb..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/GetLz4LibraryVersion.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 -# ############################################################################# -# Copyright (c) 2018-present lzutao -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# -import re - - -def find_version_tuple(filepath): - version_file_data = None - with open(filepath) as fd: - version_file_data = fd.read() - - patterns = r"""#\s*define\s+LZ4_VERSION_MAJOR\s+([0-9]+).*$ -#\s*define\s+LZ4_VERSION_MINOR\s+([0-9]+).*$ -#\s*define\s+LZ4_VERSION_RELEASE\s+([0-9]+).*$ -""" - regex = re.compile(patterns, re.MULTILINE) - version_match = regex.search(version_file_data) - if version_match: - return version_match.groups() - raise Exception("Unable to find version string.") - - -def main(): - import argparse - parser = argparse.ArgumentParser(description='Print lz4 version from lib/lz4.h') - parser.add_argument('file', help='path to lib/lz4.h') - args = parser.parse_args() - version_tuple = find_version_tuple(args.file) - print('.'.join(version_tuple)) - - -if __name__ == '__main__': - main() diff --git a/librocksdb-sys/lz4/contrib/meson/meson/contrib/gen_manual/meson.build b/librocksdb-sys/lz4/contrib/meson/meson/contrib/gen_manual/meson.build deleted file mode 100644 index 84a95a9..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/contrib/gen_manual/meson.build +++ /dev/null @@ -1,42 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -lz4_source_root = '../../../../..' - -add_languages('cpp') - -sources = files( - lz4_source_root / 'contrib/gen_manual/gen_manual.cpp' -) - -gen_manual = executable( - 'gen_manual', - sources, - native: true, - install: false -) - -manual_pages = ['lz4', 'lz4frame'] - -foreach mp : manual_pages - custom_target( - '@0@_manual.html'.format(mp), - build_by_default: true, - input: lz4_source_root / 'lib/@0@.h'.format(mp), - output: '@0@_manual.html'.format(mp), - command: [ - gen_manual, - lz4_version, - '@INPUT@', - '@OUTPUT@', - ], - install: false - ) -endforeach diff --git a/librocksdb-sys/lz4/contrib/meson/meson/contrib/meson.build b/librocksdb-sys/lz4/contrib/meson/meson/contrib/meson.build deleted file mode 100644 index ef780fb..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/contrib/meson.build +++ /dev/null @@ -1,11 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -subdir('gen_manual') diff --git a/librocksdb-sys/lz4/contrib/meson/meson/examples/meson.build b/librocksdb-sys/lz4/contrib/meson/meson/examples/meson.build deleted file mode 100644 index 65f54ca..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/examples/meson.build +++ /dev/null @@ -1,32 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -lz4_source_root = '../../../..' - -examples = { - 'printVersion': 'printVersion.c', - 'doubleBuffer': 'blockStreaming_doubleBuffer.c', - 'dictionaryRandomAccess': 'dictionaryRandomAccess.c', - 'ringBuffer': 'blockStreaming_ringBuffer.c', - 'ringBufferHC': 'HCStreaming_ringBuffer.c', - 'lineCompress': 'blockStreaming_lineByLine.c', - 'frameCompress': 'frameCompress.c', - 'compressFunctions': 'compress_functions.c', - 'simpleBuffer': 'simple_buffer.c', -} - -foreach e, src : examples - executable( - e, - lz4_source_root / 'examples' / src, - dependencies: [liblz4_internal_dep], - install: false - ) -endforeach diff --git a/librocksdb-sys/lz4/contrib/meson/meson/lib/meson.build b/librocksdb-sys/lz4/contrib/meson/meson/lib/meson.build deleted file mode 100644 index 469cd09..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/lib/meson.build +++ /dev/null @@ -1,76 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -lz4_source_root = '../../../..' - -sources = files( - lz4_source_root / 'lib/lz4.c', - lz4_source_root / 'lib/lz4frame.c', - lz4_source_root / 'lib/lz4hc.c', - lz4_source_root / 'lib/xxhash.c' -) - -c_args = [] - -if host_machine.system() == 'windows' and get_option('default_library') != 'static' - c_args += '-DLZ4_DLL_EXPORT=1' -endif - -if get_option('unstable') - compile_args += '-DLZ4_STATIC_LINKING_ONLY' - if get_option('default_library') != 'static' - c_args += '-DLZ4_PUBLISH_STATIC_FUNCTIONS' - endif -endif - -liblz4 = library( - 'lz4', - sources, - install: true, - version: lz4_version, - gnu_symbol_visibility: 'hidden' -) - -liblz4_dep = declare_dependency( - link_with: liblz4, - include_directories: include_directories(lz4_source_root / 'lib') -) - -if get_option('tests') or get_option('programs') or get_option('examples') - liblz4_internal = static_library( - 'lz4-internal', - objects: liblz4.extract_all_objects(recursive: true), - gnu_symbol_visibility: 'hidden' - ) - - liblz4_internal_dep = declare_dependency( - link_with: liblz4_internal, - include_directories: include_directories(lz4_source_root / 'lib') - ) -endif - -pkgconfig.generate( - liblz4, - name: 'lz4', - filebase: 'liblz4', - description: 'extremely fast lossless compression algorithm library', - version: lz4_version, - url: 'http://www.lz4.org/' -) - -install_headers( - lz4_source_root / 'lib/lz4.h', - lz4_source_root / 'lib/lz4hc.h', - lz4_source_root / 'lib/lz4frame.h' -) - -if get_option('default_library') != 'shared' - install_headers(lz4_source_root / 'lib/lz4frame_static.h') -endif diff --git a/librocksdb-sys/lz4/contrib/meson/meson/meson.build b/librocksdb-sys/lz4/contrib/meson/meson/meson.build deleted file mode 100644 index 9e8b8c6..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/meson.build +++ /dev/null @@ -1,67 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -cc = meson.get_compiler('c') - -pkgconfig = import('pkgconfig') - -lz4_source_root = '../../..' - -lz4_version = meson.project_version() - -lz4_h_file = lz4_source_root / 'lib/lz4.h' -GetLz4LibraryVersion_py = find_program('GetLz4LibraryVersion.py') -lz4_version = run_command(GetLz4LibraryVersion_py, lz4_h_file, check: true).stdout().strip() -message('Project version is now: @0@'.format(lz4_version)) - -add_project_arguments('-DXXH_NAMESPACE=LZ4_', language: 'c') - -if get_option('debug') - add_project_arguments(cc.get_supported_arguments([ - '-Wcast-qual', - '-Wcast-align', - '-Wshadow', - '-Wswitch-enum', - '-Wdeclaration-after-statement', - '-Wstrict-prototypes', - '-Wundef', - '-Wpointer-arith', - '-Wstrict-aliasing=1', - '-DLZ4_DEBUG=@0@'.format(get_option('debug-level')), - ] - ), - language: 'c', - ) -endif - -if get_option('memory-usage') > 0 - add_project_arguments( - '-DLZ4_MEMORY_USAGE=@0@'.format(get_option('memory-usage')), - language: 'c' - ) -endif - -subdir('lib') - -if get_option('programs') - subdir('programs') -endif - -if get_option('tests') - subdir('tests') -endif - -if get_option('contrib') - subdir('contrib') -endif - -if get_option('examples') - subdir('examples') -endif diff --git a/librocksdb-sys/lz4/contrib/meson/meson/programs/meson.build b/librocksdb-sys/lz4/contrib/meson/meson/programs/meson.build deleted file mode 100644 index f9d5bf1..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/programs/meson.build +++ /dev/null @@ -1,44 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -lz4_source_root = '../../../..' - -sources = files( - lz4_source_root / 'programs/bench.c', - lz4_source_root / 'programs/datagen.c', - lz4_source_root / 'programs/lz4cli.c', - lz4_source_root / 'programs/lz4io.c', -) - -lz4 = executable( - 'lz4', - sources, - include_directories: include_directories(lz4_source_root / 'programs'), - dependencies: [liblz4_internal_dep], - export_dynamic: get_option('debug') and host_machine.system() == 'windows', - install: true -) - -install_man(lz4_source_root / 'programs/lz4.1') - -if meson.version().version_compare('>=0.61.0') - foreach alias : ['lz4c', 'lz4cat', 'unlz4'] - install_symlink( - alias, - install_dir: get_option('bindir'), - pointing_to: 'lz4' - ) - install_symlink( - '@0@.1'.format(alias), - install_dir: get_option('mandir') / 'man1', - pointing_to: 'lz4.1' - ) - endforeach -endif diff --git a/librocksdb-sys/lz4/contrib/meson/meson/tests/meson.build b/librocksdb-sys/lz4/contrib/meson/meson/tests/meson.build deleted file mode 100644 index 18479e4..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson/tests/meson.build +++ /dev/null @@ -1,52 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -lz4_source_root = '../../../..' - -exes = { - 'fullbench': { - 'sources': files(lz4_source_root / 'tests/fullbench.c'), - 'include_directories': include_directories(lz4_source_root / 'programs'), - }, - 'fuzzer': { - 'sources': files(lz4_source_root / 'tests/fuzzer.c'), - 'include_directories': include_directories(lz4_source_root / 'programs'), - }, - 'frametest': { - 'sources': files(lz4_source_root / 'tests/frametest.c'), - 'include_directories': include_directories(lz4_source_root / 'programs'), - }, - 'roundTripTest': { - 'sources': files(lz4_source_root / 'tests/roundTripTest.c'), - }, - 'datagen': { - 'sources': files(lz4_source_root / 'tests/datagencli.c'), - 'objects': lz4.extract_objects(lz4_source_root / 'programs/datagen.c'), - 'include_directories': include_directories(lz4_source_root / 'programs'), - }, - 'checkFrame': { - 'sources': files(lz4_source_root / 'tests/checkFrame.c'), - 'include_directories': include_directories(lz4_source_root / 'programs'), - }, - 'checkTag': { - 'sources': files(lz4_source_root / 'tests/checkTag.c'), - }, -} - -foreach e, attrs : exes - executable( - e, - attrs.get('sources'), - objects: attrs.get('objects', []), - dependencies: [liblz4_internal_dep], - include_directories: attrs.get('include_directories', []), - install: false - ) -endforeach diff --git a/librocksdb-sys/lz4/contrib/meson/meson_options.txt b/librocksdb-sys/lz4/contrib/meson/meson_options.txt deleted file mode 100644 index ccb32de..0000000 --- a/librocksdb-sys/lz4/contrib/meson/meson_options.txt +++ /dev/null @@ -1,24 +0,0 @@ -# ############################################################################# -# Copyright (c) 2018-present lzutao -# Copyright (c) 2022-present Tristan Partin -# All rights reserved. -# -# This source code is licensed under both the BSD-style license (found in the -# LICENSE file in the root directory of this source tree) and the GPLv2 (found -# in the COPYING file in the root directory of this source tree). -# ############################################################################# - -option('debug-level', type: 'integer', min: 0, max: 7, value: 1, - description: 'Enable run-time debug. See lib/lz4hc.c') -option('unstable', type: 'boolean', value: false, - description: 'Expose unstable interfaces') -option('programs', type: 'boolean', value: false, - description: 'Enable programs build') -option('tests', type: 'boolean', value: false, - description: 'Enable tests build') -option('contrib', type: 'boolean', value: false, - description: 'Enable contrib build') -option('examples', type: 'boolean', value: false, - description: 'Enable examples build') -option('memory-usage', type: 'integer', min: 0, value: 0, - description: 'See LZ4_MEMORY_USAGE. 0 means use the LZ4 default') diff --git a/librocksdb-sys/lz4/contrib/snap/README.md b/librocksdb-sys/lz4/contrib/snap/README.md deleted file mode 100644 index 55c97e0..0000000 --- a/librocksdb-sys/lz4/contrib/snap/README.md +++ /dev/null @@ -1,29 +0,0 @@ -Snap Packaging --------------- - -This directory contains the config required to generate a snap package -of lz4. Snaps are universal Linux packages that allow you to easily -build your application from any source and ship it to any Linux -distribution by publishing it to https://snapcraft.io/. A key attribute -of a snap package is that it is (ideally) confined such that it -executes within a controlled environment with all its dependencies -bundled with it and does not share dependencies with of from any other -package on the system (with a couple of minor exceptions). - -The basic anatomy and workflow is: - - * ensure snap.snapcraft.yaml is up-to-date e.g. with version info - - * build the snap by installing the snapcraft package and running it - - * push snap/* changes to the repo (excluding any crud generated by a build of course) - - * register yourself as owner of lz4 name in snapstore - - * publish new snap to the snap store - - * install snap by doing 'snap install lz4' on any Linux distro - - * all installed copies of lz4 will be automatically updated to your new version - -For more information on Snaps see https://docs.snapcraft.io and https://forum.snapcraft.io/ diff --git a/librocksdb-sys/lz4/contrib/snap/snapcraft.yaml b/librocksdb-sys/lz4/contrib/snap/snapcraft.yaml deleted file mode 100644 index 04ad3c4..0000000 --- a/librocksdb-sys/lz4/contrib/snap/snapcraft.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: lz4 -version: 1.9.3 -summary: Extremely Fast Compression algorithm -description: > - LZ4 is lossless compression algorithm, providing compression - speed > 500 MB/s per core, scalable with multi-cores CPU. It features an - extremely fast decoder, with speed in multiple GB/s per core, typically - reaching RAM speed limits on multi-core systems. - . - Speed can be tuned dynamically, selecting an "acceleration" factor which - trades compression ratio for faster speed. On the other end, a high - compression derivative, LZ4_HC, is also provided, trading CPU time for - improved compression ratio. All versions feature the same decompression - speed. - . - LZ4 is also compatible with dictionary compression, and can ingest any - input file as dictionary, including those created by Zstandard Dictionary - Builder. (note: only the final 64KB are used). - . - LZ4 library is provided as open-source software using BSD 2-Clause license. -confinement: strict -grade: stable - -apps: - lz4: - command: usr/local/bin/lz4 - plugs: [home] -parts: - lz4: - source: ../ - plugin: make diff --git a/librocksdb-sys/lz4/doc/lz4_Block_format.md b/librocksdb-sys/lz4/doc/lz4_Block_format.md deleted file mode 100644 index 9e80227..0000000 --- a/librocksdb-sys/lz4/doc/lz4_Block_format.md +++ /dev/null @@ -1,244 +0,0 @@ -LZ4 Block Format Description -============================ -Last revised: 2022-07-31 . -Author : Yann Collet - - -This specification is intended for developers willing to -produce or read LZ4 compressed data blocks -using any programming language of their choice. - -LZ4 is an LZ77-type compressor with a fixed byte-oriented encoding format. -There is no entropy encoder back-end nor framing layer. -The latter is assumed to be handled by other parts of the system -(see [LZ4 Frame format]). -This design is assumed to favor simplicity and speed. - -This document describes only the Block Format, -not how the compressor nor decompressor actually work. -For more details on such topics, see later section "Implementation Notes". - -[LZ4 Frame format]: lz4_Frame_format.md - - - -Compressed block format ------------------------ -An LZ4 compressed block is composed of sequences. -A sequence is a suite of literals (not-compressed bytes), -followed by a match copy operation. - -Each sequence starts with a `token`. -The `token` is a one byte value, separated into two 4-bits fields. -Therefore each field ranges from 0 to 15. - - -The first field uses the 4 high-bits of the token. -It provides the length of literals to follow. - -If the field value is smaller than 15, -then it represents the total nb of literals present in the sequence, -including 0, in which case there is no literal. - -The value 15 is a special case: more bytes are required to indicate the full length. -Each additional byte then represents a value from 0 to 255, -which is added to the previous value to produce a total length. -When the byte value is 255, another byte must be read and added, and so on. -There can be any number of bytes of value `255` following `token`. -The Block Format does not define any "size limit", -though real implementations may feature some practical limits -(see more details in later chapter "Implementation Notes"). - -Note : this format explains why a non-compressible input block is expanded by 0.4%. - -Example 1 : A literal length of 48 will be represented as : - - - 15 : value for the 4-bits High field - - 33 : (=48-15) remaining length to reach 48 - -Example 2 : A literal length of 280 will be represented as : - - - 15 : value for the 4-bits High field - - 255 : following byte is maxed, since 280-15 >= 255 - - 10 : (=280 - 15 - 255) remaining length to reach 280 - -Example 3 : A literal length of 15 will be represented as : - - - 15 : value for the 4-bits High field - - 0 : (=15-15) yes, the zero must be output - -Following `token` and optional length bytes, are the literals themselves. -They are exactly as numerous as just decoded (length of literals). -Reminder: it's possible that there are zero literals. - - -Following the literals is the match copy operation. - -It starts by the `offset` value. -This is a 2 bytes value, in little endian format -(the 1st byte is the "low" byte, the 2nd one is the "high" byte). - -The `offset` represents the position of the match to be copied from the past. -For example, 1 means "current position - 1 byte". -The maximum `offset` value is 65535. 65536 and beyond cannot be coded. -Note that 0 is an invalid `offset` value. -The presence of a 0 `offset` value denotes an invalid (corrupted) block. - -Then the `matchlength` can be extracted. -For this, we use the second `token` field, the low 4-bits. -Such a value, obviously, ranges from 0 to 15. -However here, 0 means that the copy operation is minimal. -The minimum length of a match, called `minmatch`, is 4. -As a consequence, a 0 value means 4 bytes. -Similarly to literal length, any value smaller than 15 represents a length, -to which 4 (`minmatch`) must be added, thus ranging from 4 to 18. -A value of 15 is special, meaning 19+ bytes, -to which one must read additional bytes, one at a time, -with each byte value ranging from 0 to 255. -They are added to total to provide the final match length. -A 255 value means there is another byte to read and add. -There is no limit to the number of optional `255` bytes that can be present, -and therefore no limit to representable match length, -though real-life implementations are likely going to enforce limits for practical reasons (see more details in "Implementation Notes" section below). - -Note: this format has a maximum achievable compression ratio of about ~250. - -Decoding the `matchlength` reaches the end of current sequence. -Next byte will be the start of another sequence, and therefore a new `token`. - - -End of block conditions -------------------------- -There are specific restrictions required to terminate an LZ4 block. - -1. The last sequence contains only literals. - The block ends right after the literals (no `offset` field). -2. The last 5 bytes of input are always literals. - Therefore, the last sequence contains at least 5 bytes. - - Special : if input is smaller than 5 bytes, - there is only one sequence, it contains the whole input as literals. - Even empty input can be represented, using a zero byte, - interpreted as a final token without literal and without a match. -3. The last match must start at least 12 bytes before the end of block. - The last match is part of the _penultimate_ sequence. - It is followed by the last sequence, which contains _only_ literals. - - Note that, as a consequence, - blocks < 12 bytes cannot be compressed. - And as an extension, _independent_ blocks < 13 bytes cannot be compressed, - because they must start by at least one literal, - that the match can then copy afterwards. - -When a block does not respect these end conditions, -a conformant decoder is allowed to reject the block as incorrect. - -These rules are in place to ensure compatibility with -a wide range of historical decoders -which rely on these conditions for their speed-oriented design. - -Implementation notes ------------------------ -The LZ4 Block Format only defines the compressed format, -it does not tell how to create a decoder or an encoder, -which design is left free to the imagination of the implementer. - -However, thanks to experience, there are a number of typical topics that -most implementations will have to consider. -This section tries to provide a few guidelines. - -#### Metadata - -An LZ4-compressed Block requires additional metadata for proper decoding. -Typically, a decoder will require the compressed block's size, -and an upper bound of decompressed size. -Other variants exist, such as knowing the decompressed size, -and having an upper bound of the input size. -The Block Format does not specify how to transmit such information, -which is considered an out-of-band information channel. -That's because in many cases, the information is present in the environment. -For example, databases must store the size of their compressed block for indexing, -and know that their decompressed block can't be larger than a certain threshold. - -If you need a format which is "self-contained", -and also transports the necessary metadata for proper decoding on any platform, -consider employing the [LZ4 Frame format] instead. - -#### Large lengths - -While the Block Format does not define any maximum value for length fields, -in practice, most implementations will feature some form of limit, -since it's expected for such values to be stored into registers of fixed bit width. - -If length fields use 64-bit registers, -then it can be assumed that there is no practical limit, -as it would require a single continuous block of multiple petabytes to reach it, -which is unreasonable by today's standard. - -If length fields use 32-bit registers, then it can be overflowed, -but requires a compressed block of size > 16 MB. -Therefore, implementations that do not deal with compressed blocks > 16 MB are safe. -However, if such a case is allowed, -then it's recommended to check that no large length overflows the register. - -If length fields use 16-bit registers, -then it's definitely possible to overflow such register, -with less than < 300 bytes of compressed data. - -A conformant decoder should be able to detect length overflows when it's possible, -and simply error out when that happens. -The input block might not be invalid, -it's just not decodable by the local decoder implementation. - -Note that, in order to be compatible with the larger LZ4 ecosystem, -it's recommended to be able to read and represent lengths of up to 4 MB, -and to accept blocks of size up to 4 MB. -Such limits are compatible with 32-bit length registers, -and prevent overflow of 32-bit registers. - -#### Safe decoding - -If a decoder receives compressed data from any external source, -it is recommended to ensure that the decoder is resilient to corrupted input, -and made safe from buffer overflow manipulations. -Always ensure that read and write operations -remain within the limits of provided buffers. - -Of particular importance, ensure that the nb of bytes instructed to copy -does not overflow neither the input nor the output buffers. -Ensure also, when reading an offset value, that the resulting position to copy -does not reach beyond the beginning of the buffer. -Such a situation can happen during the first 64 KB of decoded data. - -For more safety, test the decoder with fuzzers -to ensure it's resilient to improbable sequences of conditions. -Combine them with sanitizers, in order to catch overflows (asan) -or initialization issues (msan). - -Pay some attention to offset 0 scenario, which is invalid, -and therefore must not be blindly decoded: -a naive implementation could preserve destination buffer content, -which could then result in information disclosure -if such buffer was uninitialized and still containing private data. -For reference, in such a scenario, the reference LZ4 decoder -clears the match segment with `0` bytes, -though other solutions are certainly possible. - -Finally, pay attention to the "overlap match" scenario, -when `matchlength` is larger than `offset`. -In which case, since `match_pos + matchlength > current_pos`, -some of the later bytes to copy do not exist yet, -and will be generated during the early stage of match copy operation. -Such scenario must be handled with special care. -A common case is an offset of 1, -meaning the last byte is repeated `matchlength` times. - -#### Compression techniques - -The core of a LZ4 compressor is to detect duplicated data across past 64 KB. -The format makes no assumption nor limits to the way a compressor -searches and selects matches within the source data block. -For example, an upper compression limit can be reached, -using a technique called "full optimal parsing", at high cpu and memory cost. -But multiple other techniques can be considered, -featuring distinct time / performance trade-offs. -As long as the specified format is respected, -the result will be compatible with and decodable by any compliant decoder. diff --git a/librocksdb-sys/lz4/doc/lz4_Frame_format.md b/librocksdb-sys/lz4/doc/lz4_Frame_format.md deleted file mode 100644 index 97a2cbe..0000000 --- a/librocksdb-sys/lz4/doc/lz4_Frame_format.md +++ /dev/null @@ -1,432 +0,0 @@ -LZ4 Frame Format Description -============================ - -### Notices - -Copyright (c) 2013-2020 Yann Collet - -Permission is granted to copy and distribute this document -for any purpose and without charge, -including translations into other languages -and incorporation into compilations, -provided that the copyright notice and this notice are preserved, -and that any substantive changes or deletions from the original -are clearly marked. -Distribution of this document is unlimited. - -### Version - -1.6.2 (12/08/2020) - - -Introduction ------------- - -The purpose of this document is to define a lossless compressed data format, -that is independent of CPU type, operating system, -file system and character set, suitable for -File compression, Pipe and streaming compression -using the [LZ4 algorithm](http://www.lz4.org). - -The data can be produced or consumed, -even for an arbitrarily long sequentially presented input data stream, -using only an a priori bounded amount of intermediate storage, -and hence can be used in data communications. -The format uses the LZ4 compression method, -and optional [xxHash-32 checksum method](https://github.com/Cyan4973/xxHash), -for detection of data corruption. - -The data format defined by this specification -does not attempt to allow random access to compressed data. - -This specification is intended for use by implementers of software -to compress data into LZ4 format and/or decompress data from LZ4 format. -The text of the specification assumes a basic background in programming -at the level of bits and other primitive data representations. - -Unless otherwise indicated below, -a compliant compressor must produce data sets -that conform to the specifications presented here. -It doesn't need to support all options though. - -A compliant decompressor must be able to decompress -at least one working set of parameters -that conforms to the specifications presented here. -It may also ignore checksums. -Whenever it does not support a specific parameter within the compressed stream, -it must produce a non-ambiguous error code -and associated error message explaining which parameter is unsupported. - - -General Structure of LZ4 Frame format -------------------------------------- - -| MagicNb | F. Descriptor | Block | (...) | EndMark | C. Checksum | -|:-------:|:-------------:| ----- | ----- | ------- | ----------- | -| 4 bytes | 3-15 bytes | | | 4 bytes | 0-4 bytes | - -__Magic Number__ - -4 Bytes, Little endian format. -Value : 0x184D2204 - -__Frame Descriptor__ - -3 to 15 Bytes, to be detailed in its own paragraph, -as it is the most important part of the spec. - -The combined _Magic_Number_ and _Frame_Descriptor_ fields are sometimes -called ___LZ4 Frame Header___. Its size varies between 7 and 19 bytes. - -__Data Blocks__ - -To be detailed in its own paragraph. -That’s where compressed data is stored. - -__EndMark__ - -The flow of blocks ends when the last data block is followed by -the 32-bit value `0x00000000`. - -__Content Checksum__ - -_Content_Checksum_ verify that the full content has been decoded correctly. -The content checksum is the result of [xxHash-32 algorithm] -digesting the original (decoded) data as input, and a seed of zero. -Content checksum is only present when its associated flag -is set in the frame descriptor. -Content Checksum validates the result, -that all blocks were fully transmitted in the correct order and without error, -and also that the encoding/decoding process itself generated no distortion. -Its usage is recommended. - -The combined _EndMark_ and _Content_Checksum_ fields might sometimes be -referred to as ___LZ4 Frame Footer___. Its size varies between 4 and 8 bytes. - -__Frame Concatenation__ - -In some circumstances, it may be preferable to append multiple frames, -for example in order to add new data to an existing compressed file -without re-framing it. - -In such case, each frame has its own set of descriptor flags. -Each frame is considered independent. -The only relation between frames is their sequential order. - -The ability to decode multiple concatenated frames -within a single stream or file -is left outside of this specification. -As an example, the reference lz4 command line utility behavior is -to decode all concatenated frames in their sequential order. - - -Frame Descriptor ----------------- - -| FLG | BD | (Content Size) | (Dictionary ID) | HC | -| ------- | ------- |:--------------:|:---------------:| ------- | -| 1 byte | 1 byte | 0 - 8 bytes | 0 - 4 bytes | 1 byte | - -The descriptor uses a minimum of 3 bytes, -and up to 15 bytes depending on optional parameters. - -__FLG byte__ - -| BitNb | 7-6 | 5 | 4 | 3 | 2 | 1 | 0 | -| ------- |-------|-------|----------|------|----------|----------|------| -|FieldName|Version|B.Indep|B.Checksum|C.Size|C.Checksum|*Reserved*|DictID| - - -__BD byte__ - -| BitNb | 7 | 6-5-4 | 3-2-1-0 | -| ------- | -------- | ------------- | -------- | -|FieldName|*Reserved*| Block MaxSize |*Reserved*| - -In the tables, bit 7 is highest bit, while bit 0 is lowest. - -__Version Number__ - -2-bits field, must be set to `01`. -Any other value cannot be decoded by this version of the specification. -Other version numbers will use different flag layouts. - -__Block Independence flag__ - -If this flag is set to “1”, blocks are independent. -If this flag is set to “0”, each block depends on previous ones -(up to LZ4 window size, which is 64 KB). -In such case, it’s necessary to decode all blocks in sequence. - -Block dependency improves compression ratio, especially for small blocks. -On the other hand, it makes random access or multi-threaded decoding impossible. - -__Block checksum flag__ - -If this flag is set, each data block will be followed by a 4-bytes checksum, -calculated by using the xxHash-32 algorithm on the raw (compressed) data block. -The intention is to detect data corruption (storage or transmission errors) -immediately, before decoding. -Block checksum usage is optional. - -__Content Size flag__ - -If this flag is set, the uncompressed size of data included within the frame -will be present as an 8 bytes unsigned little endian value, after the flags. -Content Size usage is optional. - -__Content checksum flag__ - -If this flag is set, a 32-bits content checksum will be appended -after the EndMark. - -__Dictionary ID flag__ - -If this flag is set, a 4-bytes Dict-ID field will be present, -after the descriptor flags and the Content Size. - -__Block Maximum Size__ - -This information is useful to help the decoder allocate memory. -Size here refers to the original (uncompressed) data size. -Block Maximum Size is one value among the following table : - -| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | -| --- | --- | --- | --- | ----- | ------ | ---- | ---- | -| N/A | N/A | N/A | N/A | 64 KB | 256 KB | 1 MB | 4 MB | - -The decoder may refuse to allocate block sizes above any system-specific size. -Unused values may be used in a future revision of the spec. -A decoder conformant with the current version of the spec -is only able to decode block sizes defined in this spec. - -__Reserved bits__ - -Value of reserved bits **must** be 0 (zero). -Reserved bit might be used in a future version of the specification, -typically enabling new optional features. -When this happens, a decoder respecting the current specification version -shall not be able to decode such a frame. - -__Content Size__ - -This is the original (uncompressed) size. -This information is optional, and only present if the associated flag is set. -Content size is provided using unsigned 8 Bytes, for a maximum of 16 Exabytes. -Format is Little endian. -This value is informational, typically for display or memory allocation. -It can be skipped by a decoder, or used to validate content correctness. - -__Dictionary ID__ - -Dict-ID is only present if the associated flag is set. -It's an unsigned 32-bits value, stored using little-endian convention. -A dictionary is useful to compress short input sequences. -The compressor can take advantage of the dictionary context -to encode the input in a more compact manner. -It works as a kind of “known prefix” which is used by -both the compressor and the decompressor to “warm-up” reference tables. - -The decompressor can use Dict-ID identifier to determine -which dictionary must be used to correctly decode data. -The compressor and the decompressor must use exactly the same dictionary. -It's presumed that the 32-bits dictID uniquely identifies a dictionary. - -Within a single frame, a single dictionary can be defined. -When the frame descriptor defines independent blocks, -each block will be initialized with the same dictionary. -If the frame descriptor defines linked blocks, -the dictionary will only be used once, at the beginning of the frame. - -__Header Checksum__ - -One-byte checksum of combined descriptor fields, including optional ones. -The value is the second byte of `xxh32()` : ` (xxh32()>>8) & 0xFF ` -using zero as a seed, and the full Frame Descriptor as an input -(including optional fields when they are present). -A wrong checksum indicates that the descriptor is erroneous. - - -Data Blocks ------------ - -| Block Size | data | (Block Checksum) | -|:----------:| ------ |:----------------:| -| 4 bytes | | 0 - 4 bytes | - - -__Block Size__ - -This field uses 4-bytes, format is little-endian. - -If the highest bit is set (`1`), the block is uncompressed. - -If the highest bit is not set (`0`), the block is LZ4-compressed, -using the [LZ4 block format specification](https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md). - -All other bits give the size, in bytes, of the data section. -The size does not include the block checksum if present. - -_Block_Size_ shall never be larger than _Block_Maximum_Size_. -Such an outcome could potentially happen for non-compressible sources. -In such a case, such data block must be passed using uncompressed format. - -A value of `0x00000000` is invalid, and signifies an _EndMark_ instead. -Note that this is different from a value of `0x80000000` (highest bit set), -which is an uncompressed block of size 0 (empty), -which is valid, and therefore doesn't end a frame. -Note that, if _Block_checksum_ is enabled, -even an empty block must be followed by a 32-bit block checksum. - -__Data__ - -Where the actual data to decode stands. -It might be compressed or not, depending on previous field indications. - -When compressed, the data must respect the [LZ4 block format specification](https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md). - -Note that a block is not necessarily full. -Uncompressed size of data can be any size __up to__ _Block_Maximum_Size_, -so it may contain less data than the maximum block size. - -__Block checksum__ - -Only present if the associated flag is set. -This is a 4-bytes checksum value, in little endian format, -calculated by using the [xxHash-32 algorithm] on the __raw__ (undecoded) data block, -and a seed of zero. -The intention is to detect data corruption (storage or transmission errors) -before decoding. - -_Block_checksum_ can be cumulative with _Content_checksum_. - -[xxHash-32 algorithm]: https://github.com/Cyan4973/xxHash/blob/release/doc/xxhash_spec.md - - -Skippable Frames ----------------- - -| Magic Number | Frame Size | User Data | -|:------------:|:----------:| --------- | -| 4 bytes | 4 bytes | | - -Skippable frames allow the integration of user-defined data -into a flow of concatenated frames. -Its design is pretty straightforward, -with the sole objective to allow the decoder to quickly skip -over user-defined data and continue decoding. - -For the purpose of facilitating identification, -it is discouraged to start a flow of concatenated frames with a skippable frame. -If there is a need to start such a flow with some user data -encapsulated into a skippable frame, -it’s recommended to start with a zero-byte LZ4 frame -followed by a skippable frame. -This will make it easier for file type identifiers. - - -__Magic Number__ - -4 Bytes, Little endian format. -Value : 0x184D2A5X, which means any value from 0x184D2A50 to 0x184D2A5F. -All 16 values are valid to identify a skippable frame. - -__Frame Size__ - -This is the size, in bytes, of the following User Data -(without including the magic number nor the size field itself). -4 Bytes, Little endian format, unsigned 32-bits. -This means User Data can’t be bigger than (2^32-1) Bytes. - -__User Data__ - -User Data can be anything. Data will just be skipped by the decoder. - - -Legacy frame ------------- - -The Legacy frame format was defined into the initial versions of “LZ4Demo”. -Newer compressors should not use this format anymore, as it is too restrictive. - -Main characteristics of the legacy format : - -- Fixed block size : 8 MB. -- All blocks must be completely filled, except the last one. -- All blocks are always compressed, even when compression is detrimental. -- The last block is detected either because - it is followed by the “EOF” (End of File) mark, - or because it is followed by a known Frame Magic Number. -- No checksum -- Convention is Little endian - -| MagicNb | B.CSize | CData | B.CSize | CData | (...) | EndMark | -| ------- | ------- | ----- | ------- | ----- | ------- | ------- | -| 4 bytes | 4 bytes | CSize | 4 bytes | CSize | x times | EOF | - - -__Magic Number__ - -4 Bytes, Little endian format. -Value : 0x184C2102 - -__Block Compressed Size__ - -This is the size, in bytes, of the following compressed data block. -4 Bytes, Little endian format. - -__Data__ - -Where the actual compressed data stands. -Data is always compressed, even when compression is detrimental. - -__EndMark__ - -End of legacy frame is implicit only. -It must be followed by a standard EOF (End Of File) signal, -whether it is a file or a stream. - -Alternatively, if the frame is followed by a valid Frame Magic Number, -it is considered completed. -This policy makes it possible to concatenate legacy frames. - -Any other value will be interpreted as a block size, -and trigger an error if it does not fit within acceptable range. - - -Version changes ---------------- - -1.6.2 : clarifies specification of _EndMark_ - -1.6.1 : introduced terms "LZ4 Frame Header" and "LZ4 Frame Footer" - -1.6.0 : restored Dictionary ID field in Frame header - -1.5.1 : changed document format to MarkDown - -1.5 : removed Dictionary ID from specification - -1.4.1 : changed wording from “stream” to “frame” - -1.4 : added skippable streams, re-added stream checksum - -1.3 : modified header checksum - -1.2 : reduced choice of “block size”, to postpone decision on “dynamic size of BlockSize Field”. - -1.1 : optional fields are now part of the descriptor - -1.0 : changed “block size” specification, adding a compressed/uncompressed flag - -0.9 : reduced scale of “block maximum size” table - -0.8 : removed : high compression flag - -0.7 : removed : stream checksum - -0.6 : settled : stream size uses 8 bytes, endian convention is little endian - -0.5: added copyright notice - -0.4 : changed format to Google Doc compatible OpenDocument diff --git a/librocksdb-sys/lz4/doc/lz4_manual.html b/librocksdb-sys/lz4/doc/lz4_manual.html deleted file mode 100644 index 6fafb21..0000000 --- a/librocksdb-sys/lz4/doc/lz4_manual.html +++ /dev/null @@ -1,613 +0,0 @@ - - - -1.9.4 Manual - - -

1.9.4 Manual

-
-

Contents

-
    -
  1. Introduction
  2. -
  3. Version
  4. -
  5. Tuning parameter
  6. -
  7. Simple Functions
  8. -
  9. Advanced Functions
  10. -
  11. Streaming Compression Functions
  12. -
  13. Streaming Decompression Functions
  14. -
  15. Experimental section
  16. -
  17. Private Definitions
  18. -
  19. Obsolete Functions
  20. -
-
-

Introduction

-  LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
-  scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
-  multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
-
-  The LZ4 compression library provides in-memory compression and decompression functions.
-  It gives full buffer control to user.
-  Compression can be done in:
-    - a single step (described as Simple Functions)
-    - a single step, reusing a context (described in Advanced Functions)
-    - unbounded multiple steps (described as Streaming compression)
-
-  lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
-  Decompressing such a compressed block requires additional metadata.
-  Exact metadata depends on exact decompression function.
-  For the typical case of LZ4_decompress_safe(),
-  metadata includes block's compressed size, and maximum bound of decompressed size.
-  Each application is free to encode and pass such metadata in whichever way it wants.
-
-  lz4.h only handle blocks, it can not generate Frames.
-
-  Blocks are different from Frames (doc/lz4_Frame_format.md).
-  Frames bundle both blocks and metadata in a specified manner.
-  Embedding metadata is required for compressed data to be self-contained and portable.
-  Frame format is delivered through a companion API, declared in lz4frame.h.
-  The `lz4` CLI can only manage frames.
-
- -
#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1)
-#  define LZ4_HEAPMODE 0
-#  define LZ4HC_HEAPMODE 0
-#  define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1
-#  if !defined(LZ4_memcpy)
-#    error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'."
-#  endif
-#  if !defined(LZ4_memset)
-#    error "LZ4_FREESTANDING requires macro 'LZ4_memset'."
-#  endif
-#  if !defined(LZ4_memmove)
-#    error "LZ4_FREESTANDING requires macro 'LZ4_memmove'."
-#  endif
-#elif ! defined(LZ4_FREESTANDING)
-#  define LZ4_FREESTANDING 0
-#endif
-

When this macro is set to 1, it enables "freestanding mode" that is - suitable for typical freestanding environment which doesn't support - standard C library. - - - LZ4_FREESTANDING is a compile-time switch. - - It requires the following macros to be defined: - LZ4_memcpy, LZ4_memmove, LZ4_memset. - - It only enables LZ4/HC functions which don't use heap. - All LZ4F_* functions are not supported. - - See tests/freestanding.c to check its basic setup. - -


- -

Version


-
-
int LZ4_versionNumber (void);  /**< library version number; useful to check dll version; requires v1.3.0+ */
-

-
const char* LZ4_versionString (void);   /**< library version string; useful to check dll version; requires v1.7.5+ */
-

-

Tuning parameter


-
-
#ifndef LZ4_MEMORY_USAGE
-# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
-#endif
-

Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; ) - Increasing memory usage improves compression ratio, at the cost of speed. - Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality. - Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache - -


- -

Simple Functions


-
-
int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
-

Compresses 'srcSize' bytes from buffer 'src' - into already allocated 'dst' buffer of size 'dstCapacity'. - Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). - It also runs faster, so it's a recommended setting. - If the function cannot compress 'src' into a more limited 'dst' budget, - compression stops *immediately*, and the function result is zero. - In which case, 'dst' content is undefined (invalid). - srcSize : max supported value is LZ4_MAX_INPUT_SIZE. - dstCapacity : size of buffer 'dst' (which must be already allocated) - @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) - or 0 if compression fails - Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). - -


- -
int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
-

compressedSize : is the exact complete size of the compressed block. - dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size. - @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) - If destination buffer is not large enough, decoding will stop and output an error code (negative value). - If the source stream is detected malformed, the function will stop decoding and return a negative result. - Note 1 : This function is protected against malicious data packets : - it will never writes outside 'dst' buffer, nor read outside 'source' buffer, - even if the compressed block is maliciously modified to order the decoder to do these actions. - In such case, the decoder stops immediately, and considers the compressed block malformed. - Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. - The implementation is free to send / store / derive this information in whichever way is most beneficial. - If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. - -


- -

Advanced Functions


-
-
int LZ4_compressBound(int inputSize);
-

Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) - This function is primarily useful for memory allocation purposes (destination buffer size). - Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). - Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) - inputSize : max supported value is LZ4_MAX_INPUT_SIZE - return : maximum output size in a "worst case" scenario - or 0, if input size is incorrect (too large or negative) -


- -
int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-

Same as LZ4_compress_default(), but allows selection of "acceleration" factor. - The larger the acceleration value, the faster the algorithm, but also the lesser the compression. - It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. - An acceleration value of "1" is the same as regular LZ4_compress_default() - Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). - Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c). -


- -
int LZ4_sizeofState(void);
-int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-

Same as LZ4_compress_fast(), using an externally allocated memory space for its state. - Use LZ4_sizeofState() to know how much memory must be allocated, - and allocate it on 8-bytes boundaries (using `malloc()` typically). - Then, provide this buffer as `void* state` to compression function. - -


- -
int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
-

Reverse the logic : compresses as much data as possible from 'src' buffer - into already allocated buffer 'dst', of size >= 'targetDestSize'. - This function either compresses the entire 'src' content into 'dst' if it's large enough, - or fill 'dst' buffer completely with as much data as possible from 'src'. - note: acceleration parameter is fixed to "default". - - *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. - New value is necessarily <= input value. - @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) - or 0 if compression fails. - - Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+): - the produced compressed content could, in specific circumstances, - require to be decompressed into a destination buffer larger - by at least 1 byte than the content to decompress. - If an application uses `LZ4_compress_destSize()`, - it's highly recommended to update liblz4 to v1.9.2 or better. - If this can't be done or ensured, - the receiving decompression function should provide - a dstCapacity which is > decompressedSize, by at least 1 byte. - See https://github.com/lz4/lz4/issues/859 for details - -


- -
int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
-

Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', - into destination buffer 'dst' of size 'dstCapacity'. - Up to 'targetOutputSize' bytes will be decoded. - The function stops decoding on reaching this objective. - This can be useful to boost performance - whenever only the beginning of a block is required. - - @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize) - If source stream is detected malformed, function returns a negative result. - - Note 1 : @return can be < targetOutputSize, if compressed block contains less data. - - Note 2 : targetOutputSize must be <= dstCapacity - - Note 3 : this function effectively stops decoding on reaching targetOutputSize, - so dstCapacity is kind of redundant. - This is because in older versions of this function, - decoding operation would still write complete sequences. - Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize, - it could write more bytes, though only up to dstCapacity. - Some "margin" used to be required for this operation to work properly. - Thankfully, this is no longer necessary. - The function nonetheless keeps the same signature, in an effort to preserve API compatibility. - - Note 4 : If srcSize is the exact size of the block, - then targetOutputSize can be any value, - including larger than the block's decompressed size. - The function will, at most, generate block's decompressed size. - - Note 5 : If srcSize is _larger_ than block's compressed size, - then targetOutputSize **MUST** be <= block's decompressed size. - Otherwise, *silent corruption will occur*. - -


- -

Streaming Compression Functions


-
-
void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
-

Use this to prepare an LZ4_stream_t for a new chain of dependent blocks - (e.g., LZ4_compress_fast_continue()). - - An LZ4_stream_t must be initialized once before usage. - This is automatically done when created by LZ4_createStream(). - However, should the LZ4_stream_t be simply declared on stack (for example), - it's necessary to initialize it first, using LZ4_initStream(). - - After init, start any new stream with LZ4_resetStream_fast(). - A same LZ4_stream_t can be re-used multiple times consecutively - and compress multiple streams, - provided that it starts each new stream with LZ4_resetStream_fast(). - - LZ4_resetStream_fast() is much faster than LZ4_initStream(), - but is not compatible with memory regions containing garbage data. - - Note: it's only useful to call LZ4_resetStream_fast() - in the context of streaming compression. - The *extState* functions perform their own resets. - Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. - -


- -
int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
-

Use this function to reference a static dictionary into LZ4_stream_t. - The dictionary must remain available during compression. - LZ4_loadDict() triggers a reset, so any previous data will be forgotten. - The same dictionary will have to be loaded on decompression side for successful decoding. - Dictionary are useful for better compression of small data (KB range). - While LZ4 accept any input as dictionary, - results are generally better when using Zstandard's Dictionary Builder. - Loading a size of 0 is allowed, and is the same as reset. - @return : loaded dictionary size, in bytes (necessarily <= 64 KB) - -


- -
int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-

Compress 'src' content using data from previously compressed blocks, for better compression ratio. - 'dst' buffer must be already allocated. - If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. - - @return : size of compressed block - or 0 if there is an error (typically, cannot fit into 'dst'). - - Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. - Each block has precise boundaries. - Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. - It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. - - Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! - - Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. - Make sure that buffers are separated, by at least one byte. - This construction ensures that each block only depends on previous block. - - Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. - - Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. - -


- -
int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
-

If last 64KB data cannot be guaranteed to remain available at its current memory location, - save it into a safer place (char* safeBuffer). - This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), - but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. - @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. - -


- -

Streaming Decompression Functions

  Bufferless synchronous API
-
- -
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
-LZ4_streamDecode_t* LZ4_createStreamDecode(void);
-int                 LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
-#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
-

creation / destruction of streaming decompression tracking context. - A tracking context can be re-used multiple times. - -


- -
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
-

An LZ4_streamDecode_t context can be allocated once and re-used multiple times. - Use this function to start decompression of a new stream of blocks. - A dictionary can optionally be set. Use NULL or size 0 for a reset order. - Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. - @return : 1 if OK, 0 if error - -


- -
int LZ4_decoderRingBufferSize(int maxBlockSize);
-#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize))  /* for static allocation; maxBlockSize presumed valid */
-

Note : in a ring buffer scenario (optional), - blocks are presumed decompressed next to each other - up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), - at which stage it resumes from beginning of ring buffer. - When setting such a ring buffer for streaming decompression, - provides the minimum size of this ring buffer - to be compatible with any source respecting maxBlockSize condition. - @return : minimum ring buffer size, - or 0 if there is an error (invalid maxBlockSize). - -


- -
int
-LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode,
-                        const char* src, char* dst,
-                        int srcSize, int dstCapacity);
-

These decoding functions allow decompression of consecutive blocks in "streaming" mode. - A block is an unsplittable entity, it must be presented entirely to a decompression function. - Decompression functions only accepts one block at a time. - The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. - If less than 64KB of data has been decoded, all the data must be present. - - Special : if decompression side sets a ring buffer, it must respect one of the following conditions : - - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). - maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. - In which case, encoding and decoding buffers do not need to be synchronized. - Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. - - Synchronized mode : - Decompression buffer size is _exactly_ the same as compression buffer size, - and follows exactly same update rule (block boundaries at same positions), - and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), - _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). - - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. - In which case, encoding and decoding buffers do not need to be synchronized, - and encoding ring buffer can have any size, including small ones ( < 64 KB). - - Whenever these conditions are not possible, - save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, - then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. -


- -
int
-LZ4_decompress_safe_usingDict(const char* src, char* dst,
-                              int srcSize, int dstCapacity,
-                              const char* dictStart, int dictSize);
-

These decoding functions work the same as - a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() - They are stand-alone, and don't need an LZ4_streamDecode_t structure. - Dictionary is presumed stable : it must remain accessible and unmodified during decompression. - Performance tip : Decompression speed can be substantially increased - when dst == dictStart + dictSize. - -


- -

Experimental section

- Symbols declared in this section must be considered unstable. Their
- signatures or semantics may change, or they may be removed altogether in the
- future. They are therefore only safe to depend on when the caller is
- statically linked against the library.
-
- To protect against unsafe usage, not only are the declarations guarded,
- the definitions are hidden by default
- when building LZ4 as a shared/dynamic library.
-
- In order to access these declarations,
- define LZ4_STATIC_LINKING_ONLY in your application
- before including LZ4's headers.
-
- In order to make their implementations accessible dynamically, you must
- define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
-
- -
LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-

A variant of LZ4_compress_fast_extState(). - - Using this variant avoids an expensive initialization step. - It is only safe to call if the state buffer is known to be correctly initialized already - (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). - From a high level, the difference is that - this function initializes the provided state with a call to something like LZ4_resetStream_fast() - while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). - -


- -
LZ4LIB_STATIC_API void
-LZ4_attach_dictionary(LZ4_stream_t* workingStream,
-                const LZ4_stream_t* dictionaryStream);
-

This is an experimental API that allows - efficient use of a static dictionary many times. - - Rather than re-loading the dictionary buffer into a working context before - each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a - working LZ4_stream_t, this function introduces a no-copy setup mechanism, - in which the working stream references the dictionary stream in-place. - - Several assumptions are made about the state of the dictionary stream. - Currently, only streams which have been prepared by LZ4_loadDict() should - be expected to work. - - Alternatively, the provided dictionaryStream may be NULL, - in which case any existing dictionary stream is unset. - - If a dictionary is provided, it replaces any pre-existing stream history. - The dictionary contents are the only history that can be referenced and - logically immediately precede the data compressed in the first subsequent - compression call. - - The dictionary will only remain attached to the working stream through the - first compression call, at the end of which it is cleared. The dictionary - stream (and source buffer) must remain in-place / accessible / unchanged - through the completion of the first compression call on the stream. - -


- -

- It's possible to have input and output sharing the same buffer, - for highly constrained memory environments. - In both cases, it requires input to lay at the end of the buffer, - and decompression to start at beginning of the buffer. - Buffer size must feature some margin, hence be larger than final size. - - |<------------------------buffer--------------------------------->| - |<-----------compressed data--------->| - |<-----------decompressed size------------------>| - |<----margin---->| - - This technique is more useful for decompression, - since decompressed size is typically larger, - and margin is short. - - In-place decompression will work inside any buffer - which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). - This presumes that decompressedSize > compressedSize. - Otherwise, it means compression actually expanded data, - and it would be more efficient to store such data with a flag indicating it's not compressed. - This can happen when data is not compressible (already compressed, or encrypted). - - For in-place compression, margin is larger, as it must be able to cope with both - history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, - and data expansion, which can happen when input is not compressible. - As a consequence, buffer size requirements are much higher, - and memory savings offered by in-place compression are more limited. - - There are ways to limit this cost for compression : - - Reduce history size, by modifying LZ4_DISTANCE_MAX. - Note that it is a compile-time constant, so all compressions will apply this limit. - Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, - so it's a reasonable trick when inputs are known to be small. - - Require the compressor to deliver a "maximum compressed size". - This is the `dstCapacity` parameter in `LZ4_compress*()`. - When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, - in which case, the return code will be 0 (zero). - The caller must be ready for these cases to happen, - and typically design a backup scheme to send data uncompressed. - The combination of both techniques can significantly reduce - the amount of margin required for in-place compression. - - In-place compression can work in any buffer - which size is >= (maxCompressedSize) - with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. - LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, - so it's possible to reduce memory requirements by playing with them. - -


- -
#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize)   ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize))  /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
-

-
#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize)   ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN)  /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
-

-

Private Definitions

- Do not use these definitions directly.
- They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
- Accessing members will expose user code to API and/or ABI break in future versions of the library.
-
- -

Never ever use below internal definitions directly ! - These definitions are not API/ABI safe, and may change in future versions. - If you need static allocation, declare or allocate an LZ4_stream_t object. -


- -
LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
-

An LZ4_stream_t structure must be initialized at least once. - This is automatically done when invoking LZ4_createStream(), - but it's not when the structure is simply declared on stack (for example). - - Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. - It can also initialize any arbitrary buffer of sufficient size, - and will @return a pointer of proper type upon initialization. - - Note : initialization fails if size and alignment conditions are not respected. - In which case, the function will @return NULL. - Note2: An LZ4_stream_t structure guarantees correct alignment and size. - Note3: Before v1.9.0, use LZ4_resetStream() instead -


- -
typedef struct {
-    const LZ4_byte* externalDict;
-    const LZ4_byte* prefixEnd;
-    size_t extDictSize;
-    size_t prefixSize;
-} LZ4_streamDecode_t_internal;
-

Never ever use below internal definitions directly ! - These definitions are not API/ABI safe, and may change in future versions. - If you need static allocation, declare or allocate an LZ4_streamDecode_t object. -


- -

Obsolete Functions


-
-
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
-#  define LZ4_DEPRECATED(message)   /* disable deprecation warnings */
-#else
-#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
-#    define LZ4_DEPRECATED(message) [[deprecated(message)]]
-#  elif defined(_MSC_VER)
-#    define LZ4_DEPRECATED(message) __declspec(deprecated(message))
-#  elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
-#    define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
-#  elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
-#    define LZ4_DEPRECATED(message) __attribute__((deprecated))
-#  else
-#    pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
-#    define LZ4_DEPRECATED(message)   /* disabled */
-#  endif
-#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
-

- Deprecated functions make the compiler generate a warning when invoked. - This is meant to invite users to update their source code. - Should deprecation warnings be a problem, it is generally possible to disable them, - typically with -Wno-deprecated-declarations for gcc - or _CRT_SECURE_NO_WARNINGS in Visual. - - Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS - before including the header file. - -


- -
LZ4_DEPRECATED("use LZ4_compress_default() instead")       LZ4LIB_API int LZ4_compress               (const char* src, char* dest, int srcSize);
-LZ4_DEPRECATED("use LZ4_compress_default() instead")       LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState               (void* state, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue                (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue  (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-


- -
LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
-LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
-


- -
LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
-LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
-


- -
LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
-int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
-int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
-int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
-

These functions used to be faster than LZ4_decompress_safe(), - but this is no longer the case. They are now slower. - This is because LZ4_decompress_fast() doesn't know the input size, - and therefore must progress more cautiously into the input buffer to not read beyond the end of block. - On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. - As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. - - The last remaining LZ4_decompress_fast() specificity is that - it can decompress a block without knowing its compressed size. - Such functionality can be achieved in a more secure manner - by employing LZ4_decompress_safe_partial(). - - Parameters: - originalSize : is the uncompressed size to regenerate. - `dst` must be already allocated, its size must be >= 'originalSize' bytes. - @return : number of bytes read from source buffer (== compressed size). - The function expects to finish at block's end exactly. - If the source stream is detected malformed, the function stops decoding and returns a negative result. - note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. - However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. - Also, since match offsets are not validated, match reads from 'src' may underflow too. - These issues never happen if input (compressed) data is correct. - But they may happen if input data is invalid (error or intentional tampering). - As a consequence, use these functions in trusted environments with trusted data **only**. - -


- -
void LZ4_resetStream (LZ4_stream_t* streamPtr);
-

An LZ4_stream_t structure must be initialized at least once. - This is done with LZ4_initStream(), or LZ4_resetStream(). - Consider switching to LZ4_initStream(), - invoking LZ4_resetStream() will trigger deprecation warnings in the future. - -


- - - diff --git a/librocksdb-sys/lz4/doc/lz4frame_manual.html b/librocksdb-sys/lz4/doc/lz4frame_manual.html deleted file mode 100644 index cfb437e..0000000 --- a/librocksdb-sys/lz4/doc/lz4frame_manual.html +++ /dev/null @@ -1,455 +0,0 @@ - - - -1.9.4 Manual - - -

1.9.4 Manual

-
-

Contents

-
    -
  1. Introduction
  2. -
  3. Compiler specifics
  4. -
  5. Error management
  6. -
  7. Frame compression types
  8. -
  9. Simple compression function
  10. -
  11. Advanced compression functions
  12. -
  13. Resource Management
  14. -
  15. Compression
  16. -
  17. Decompression functions
  18. -
  19. Streaming decompression functions
  20. -
  21. Bulk processing dictionary API
  22. -
-
-

Introduction

- lz4frame.h implements LZ4 frame specification: see doc/lz4_Frame_format.md .
- LZ4 Frames are compatible with `lz4` CLI,
- and designed to be interoperable with any system.
-
- -

Compiler specifics


-
-

Error management


-
-
unsigned    LZ4F_isError(LZ4F_errorCode_t code);   /**< tells when a function result is an error code */
-

-
const char* LZ4F_getErrorName(LZ4F_errorCode_t code);   /**< return error code string; for debugging */
-

-

Frame compression types

 
-
- -
typedef enum {
-    LZ4F_default=0,
-    LZ4F_max64KB=4,
-    LZ4F_max256KB=5,
-    LZ4F_max1MB=6,
-    LZ4F_max4MB=7
-    LZ4F_OBSOLETE_ENUM(max64KB)
-    LZ4F_OBSOLETE_ENUM(max256KB)
-    LZ4F_OBSOLETE_ENUM(max1MB)
-    LZ4F_OBSOLETE_ENUM(max4MB)
-} LZ4F_blockSizeID_t;
-

-
typedef enum {
-    LZ4F_blockLinked=0,
-    LZ4F_blockIndependent
-    LZ4F_OBSOLETE_ENUM(blockLinked)
-    LZ4F_OBSOLETE_ENUM(blockIndependent)
-} LZ4F_blockMode_t;
-

-
typedef enum {
-    LZ4F_noContentChecksum=0,
-    LZ4F_contentChecksumEnabled
-    LZ4F_OBSOLETE_ENUM(noContentChecksum)
-    LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
-} LZ4F_contentChecksum_t;
-

-
typedef enum {
-    LZ4F_noBlockChecksum=0,
-    LZ4F_blockChecksumEnabled
-} LZ4F_blockChecksum_t;
-

-
typedef enum {
-    LZ4F_frame=0,
-    LZ4F_skippableFrame
-    LZ4F_OBSOLETE_ENUM(skippableFrame)
-} LZ4F_frameType_t;
-

-
typedef struct {
-  LZ4F_blockSizeID_t     blockSizeID;         /* max64KB, max256KB, max1MB, max4MB; 0 == default */
-  LZ4F_blockMode_t       blockMode;           /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
-  LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
-  LZ4F_frameType_t       frameType;           /* read-only field : LZ4F_frame or LZ4F_skippableFrame */
-  unsigned long long     contentSize;         /* Size of uncompressed content ; 0 == unknown */
-  unsigned               dictID;              /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
-  LZ4F_blockChecksum_t   blockChecksumFlag;   /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
-} LZ4F_frameInfo_t;
-

makes it possible to set or read frame parameters. - Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO, - setting all parameters to default. - It's then possible to update selectively some parameters -


- -
typedef struct {
-  LZ4F_frameInfo_t frameInfo;
-  int      compressionLevel;    /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
-  unsigned autoFlush;           /* 1: always flush; reduces usage of internal buffers */
-  unsigned favorDecSpeed;       /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */  /* v1.8.2+ */
-  unsigned reserved[3];         /* must be zero for forward compatibility */
-} LZ4F_preferences_t;
-

makes it possible to supply advanced compression instructions to streaming interface. - Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES, - setting all parameters to default. - All reserved fields must be set to zero. -


- -

Simple compression function


-
-
size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
-

Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences. - `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences. - Note : this result is only usable with LZ4F_compressFrame(). - It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed. - -


- -
size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
-                                const void* srcBuffer, size_t srcSize,
-                                const LZ4F_preferences_t* preferencesPtr);
-

Compress an entire srcBuffer into a valid LZ4 frame. - dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. - @return : number of bytes written into dstBuffer. - or an error code if it fails (can be tested using LZ4F_isError()) - -


- -

Advanced compression functions


-
-
typedef struct {
-  unsigned stableSrc;    /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
-  unsigned reserved[3];
-} LZ4F_compressOptions_t;
-

-

Resource Management


-
-
LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version);
-LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
-

The first thing to do is to create a compressionContext object, - which will keep track of operation state during streaming compression. - This is achieved using LZ4F_createCompressionContext(), which takes as argument a version, - and a pointer to LZ4F_cctx*, to write the resulting pointer into. - @version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL. - The function provides a pointer to a fully allocated LZ4F_cctx object. - @cctxPtr MUST be != NULL. - If @return != zero, context creation failed. - A created compression context can be employed multiple times for consecutive streaming operations. - Once all streaming compression jobs are completed, - the state object can be released using LZ4F_freeCompressionContext(). - Note1 : LZ4F_freeCompressionContext() is always successful. Its return value can be ignored. - Note2 : LZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing). -


- -

Compression


-
-
size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
-                                      void* dstBuffer, size_t dstCapacity,
-                                      const LZ4F_preferences_t* prefsPtr);
-

will write the frame header into dstBuffer. - dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default. - @return : number of bytes written into dstBuffer for the header - or an error code (which can be tested using LZ4F_isError()) - -


- -
size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
-

Provides minimum dstCapacity required to guarantee success of - LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario. - When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead. - Note that the result is only valid for a single invocation of LZ4F_compressUpdate(). - When invoking LZ4F_compressUpdate() multiple times, - if the output buffer is gradually filled up instead of emptied and re-used from its start, - one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound(). - @return is always the same for a srcSize and prefsPtr. - prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. - tech details : - @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes. - It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd(). - @return doesn't include frame header, as it was already generated by LZ4F_compressBegin(). - -


- -
size_t LZ4F_compressUpdate(LZ4F_cctx* cctx,
-                                       void* dstBuffer, size_t dstCapacity,
-                                 const void* srcBuffer, size_t srcSize,
-                                 const LZ4F_compressOptions_t* cOptPtr);
-

LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations. - This value is provided by LZ4F_compressBound(). - If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). - After an error, the state is left in a UB state, and must be re-initialized or freed. - If previously an uncompressed block was written, buffered data is flushed - before appending compressed data is continued. - `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. - @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). - or an error code if it fails (which can be tested using LZ4F_isError()) - -


- -
size_t LZ4F_flush(LZ4F_cctx* cctx,
-                              void* dstBuffer, size_t dstCapacity,
-                        const LZ4F_compressOptions_t* cOptPtr);
-

When data must be generated and sent immediately, without waiting for a block to be completely filled, - it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. - `dstCapacity` must be large enough to ensure the operation will be successful. - `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. - @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx) - or an error code if it fails (which can be tested using LZ4F_isError()) - Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - -


- -
size_t LZ4F_compressEnd(LZ4F_cctx* cctx,
-                                    void* dstBuffer, size_t dstCapacity,
-                              const LZ4F_compressOptions_t* cOptPtr);
-

To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). - It will flush whatever data remained within `cctx` (like LZ4_flush()) - and properly finalize the frame, with an endMark and a checksum. - `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default. - @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark), - or an error code if it fails (which can be tested using LZ4F_isError()) - Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. - -


- -

Decompression functions


-
-
typedef struct {
-  unsigned stableDst;     /* pledges that last 64KB decompressed data will remain available unmodified between invocations.
-                           * This optimization skips storage operations in tmp buffers. */
-  unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time.
-                           * Setting this option to 1 once disables all checksums for the rest of the frame. */
-  unsigned reserved1;     /* must be set to zero for forward compatibility */
-  unsigned reserved0;     /* idem */
-} LZ4F_decompressOptions_t;
-

-
LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
-LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
-

Create an LZ4F_dctx object, to track all decompression operations. - @version provided MUST be LZ4F_VERSION. - @dctxPtr MUST be valid. - The function fills @dctxPtr with the value of a pointer to an allocated and initialized LZ4F_dctx object. - The @return is an errorCode, which can be tested using LZ4F_isError(). - dctx memory can be released using LZ4F_freeDecompressionContext(); - Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released. - That is, it should be == 0 if decompression has been completed fully and correctly. - -


- -

Streaming decompression functions


-
-
size_t LZ4F_headerSize(const void* src, size_t srcSize);
-

Provide the header size of a frame starting at `src`. - `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH, - which is enough to decode the header length. - @return : size of frame header - or an error code, which can be tested using LZ4F_isError() - note : Frame header size is variable, but is guaranteed to be - >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes. - -


- -
size_t
-LZ4F_getFrameInfo(LZ4F_dctx* dctx,
-                  LZ4F_frameInfo_t* frameInfoPtr,
-            const void* srcBuffer, size_t* srcSizePtr);
-

This function extracts frame parameters (max blockSize, dictID, etc.). - Its usage is optional: user can also invoke LZ4F_decompress() directly. - - Extracted information will fill an existing LZ4F_frameInfo_t structure. - This can be useful for allocation and dictionary identification purposes. - - LZ4F_getFrameInfo() can work in the following situations : - - 1) At the beginning of a new frame, before any invocation of LZ4F_decompress(). - It will decode header from `srcBuffer`, - consuming the header and starting the decoding process. - - Input size must be large enough to contain the full frame header. - Frame header size can be known beforehand by LZ4F_headerSize(). - Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes, - and not more than <= LZ4F_HEADER_SIZE_MAX bytes. - Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work. - It's allowed to provide more input data than the header size, - LZ4F_getFrameInfo() will only consume the header. - - If input size is not large enough, - aka if it's smaller than header size, - function will fail and return an error code. - - 2) After decoding has been started, - it's possible to invoke LZ4F_getFrameInfo() anytime - to extract already decoded frame parameters stored within dctx. - - Note that, if decoding has barely started, - and not yet read enough information to decode the header, - LZ4F_getFrameInfo() will fail. - - The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value). - LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started, - and when decoding the header has been successful. - Decompression must then resume from (srcBuffer + *srcSizePtr). - - @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call, - or an error code which can be tested using LZ4F_isError(). - note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely. - note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. - -


- -
size_t
-LZ4F_decompress(LZ4F_dctx* dctx,
-                void* dstBuffer, size_t* dstSizePtr,
-          const void* srcBuffer, size_t* srcSizePtr,
-          const LZ4F_decompressOptions_t* dOptPtr);
-

Call this function repetitively to regenerate data compressed in `srcBuffer`. - - The function requires a valid dctx state. - It will read up to *srcSizePtr bytes from srcBuffer, - and decompress data into dstBuffer, of capacity *dstSizePtr. - - The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). - The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). - - The function does not necessarily read all input bytes, so always check value in *srcSizePtr. - Unconsumed source data must be presented again in subsequent invocations. - - `dstBuffer` can freely change between each consecutive function invocation. - `dstBuffer` content will be overwritten. - - @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call. - Schematically, it's the size of the current (or remaining) compressed block + header of next block. - Respecting the hint provides some small speed benefit, because it skips intermediate buffers. - This is just a hint though, it's always possible to provide any srcSize. - - When a frame is fully decoded, @return will be 0 (no more data expected). - When provided with more bytes than necessary to decode a frame, - LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0. - - If decompression failed, @return is an error code, which can be tested using LZ4F_isError(). - After a decompression error, the `dctx` context is not resumable. - Use LZ4F_resetDecompressionContext() to return to clean state. - - After a frame is fully decoded, dctx can be used again to decompress another frame. - -


- -
void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx);   /* always successful */
-

In case of an error, the context is left in "undefined" state. - In which case, it's necessary to reset it, before re-using it. - This method can also be used to abruptly stop any unfinished decompression, - and start a new one using same context resources. -


- -
typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM)
-              _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes;
-

-
LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID);
-

Return, in scalar format (size_t), - the maximum block size associated with blockSizeID. -


- -
LZ4FLIB_STATIC_API size_t
-LZ4F_uncompressedUpdate(LZ4F_cctx* cctx,
-                        void* dstBuffer, size_t dstCapacity,
-                  const void* srcBuffer, size_t srcSize,
-                  const LZ4F_compressOptions_t* cOptPtr);
-

LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary. - Important rule: dstCapacity MUST be large enough to store the entire source buffer as - no compression is done for this operation - If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode). - After an error, the state is left in a UB state, and must be re-initialized or freed. - If previously a compressed block was written, buffered data is flushed - before appending uncompressed data is continued. - This is only supported when LZ4F_blockIndependent is used - `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. - @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). - or an error code if it fails (which can be tested using LZ4F_isError()) - -


- -

Bulk processing dictionary API


-
-
LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
-LZ4FLIB_STATIC_API void        LZ4F_freeCDict(LZ4F_CDict* CDict);
-

When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once. - LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. - LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict -


- -
LZ4FLIB_STATIC_API size_t
-LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
-                              void* dst, size_t dstCapacity,
-                        const void* src, size_t srcSize,
-                        const LZ4F_CDict* cdict,
-                        const LZ4F_preferences_t* preferencesPtr);
-

Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. - cctx must point to a context created by LZ4F_createCompressionContext(). - If cdict==NULL, compress without a dictionary. - dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - If this condition is not respected, function will fail (@return an errorCode). - The LZ4F_preferences_t structure is optional : you may provide NULL as argument, - but it's not recommended, as it's the only way to provide dictID in the frame header. - @return : number of bytes written into dstBuffer. - or an error code if it fails (can be tested using LZ4F_isError()) -


- -
LZ4FLIB_STATIC_API size_t
-LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
-                              void* dstBuffer, size_t dstCapacity,
-                        const LZ4F_CDict* cdict,
-                        const LZ4F_preferences_t* prefsPtr);
-

Inits streaming dictionary compression, and writes the frame header into dstBuffer. - dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - `prefsPtr` is optional : you may provide NULL as argument, - however, it's the only way to provide dictID in the frame header. - @return : number of bytes written into dstBuffer for the header, - or an error code (which can be tested using LZ4F_isError()) -


- -
LZ4FLIB_STATIC_API size_t
-LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr,
-                          void* dstBuffer, size_t* dstSizePtr,
-                    const void* srcBuffer, size_t* srcSizePtr,
-                    const void* dict, size_t dictSize,
-                    const LZ4F_decompressOptions_t* decompressOptionsPtr);
-

Same as LZ4F_decompress(), using a predefined dictionary. - Dictionary is used "in place", without any preprocessing. - It must remain accessible throughout the entire frame decoding. -


- -
typedef void* (*LZ4F_AllocFunction) (void* opaqueState, size_t size);
-typedef void* (*LZ4F_CallocFunction) (void* opaqueState, size_t size);
-typedef void  (*LZ4F_FreeFunction) (void* opaqueState, void* address);
-typedef struct {
-    LZ4F_AllocFunction customAlloc;
-    LZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */
-    LZ4F_FreeFunction customFree;
-    void* opaqueState;
-} LZ4F_CustomMem;
-static
-#ifdef __GNUC__
-__attribute__((__unused__))
-#endif
-LZ4F_CustomMem const LZ4F_defaultCMem = { NULL, NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
-

These prototypes make it possible to pass custom allocation/free functions. - LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below. - All allocation/free operations will be completed using these custom variants instead of regular ones. - -


- - - diff --git a/librocksdb-sys/lz4/examples/.gitignore b/librocksdb-sys/lz4/examples/.gitignore deleted file mode 100644 index ddc8e21..0000000 --- a/librocksdb-sys/lz4/examples/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -/Makefile.lz4* -/printVersion -/doubleBuffer -/dictionaryRandomAccess -/ringBuffer -/ringBufferHC -/lineCompress -/frameCompress -/fileCompress -/simpleBuffer -/*.exe diff --git a/librocksdb-sys/lz4/examples/HCStreaming_ringBuffer.c b/librocksdb-sys/lz4/examples/HCStreaming_ringBuffer.c deleted file mode 100644 index bc8391e..0000000 --- a/librocksdb-sys/lz4/examples/HCStreaming_ringBuffer.c +++ /dev/null @@ -1,232 +0,0 @@ -// LZ4 HC streaming API example : ring buffer -// Based on a previous example by Takayuki Matsuoka - - -/************************************** - * Compiler Options - **************************************/ -#if defined(_MSC_VER) && (_MSC_VER <= 1800) /* Visual Studio <= 2013 */ -# define _CRT_SECURE_NO_WARNINGS -# define snprintf sprintf_s -#endif - -#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -#ifdef __GNUC__ -# pragma GCC diagnostic ignored "-Wmissing-braces" /* GCC bug 53119 : doesn't accept { 0 } as initializer (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53119) */ -#endif - - -/************************************** - * Includes - **************************************/ -#include "lz4hc.h" -#include "lz4.h" - -#include -#include -#include -#include -#include - -enum { - MESSAGE_MAX_BYTES = 1024, - RING_BUFFER_BYTES = 1024 * 8 + MESSAGE_MAX_BYTES, - DEC_BUFFER_BYTES = RING_BUFFER_BYTES + MESSAGE_MAX_BYTES // Intentionally larger to test unsynchronized ring buffers -}; - - -size_t write_int32(FILE* fp, int32_t i) { - return fwrite(&i, sizeof(i), 1, fp); -} - -size_t write_bin(FILE* fp, const void* array, int arrayBytes) { - assert(arrayBytes >= 0); - return fwrite(array, 1, (size_t)arrayBytes, fp); -} - -size_t read_int32(FILE* fp, int32_t* i) { - return fread(i, sizeof(*i), 1, fp); -} - -size_t read_bin(FILE* fp, void* array, int arrayBytes) { - assert(arrayBytes >= 0); - return fread(array, 1, (size_t)arrayBytes, fp); -} - - -void test_compress(FILE* outFp, FILE* inpFp) -{ - LZ4_streamHC_t lz4Stream_body = { 0 }; - LZ4_streamHC_t* lz4Stream = &lz4Stream_body; - - static char inpBuf[RING_BUFFER_BYTES]; - int inpOffset = 0; - - for(;;) { - // Read random length ([1,MESSAGE_MAX_BYTES]) data to the ring buffer. - char* const inpPtr = &inpBuf[inpOffset]; - const int randomLength = (rand() % MESSAGE_MAX_BYTES) + 1; - const int inpBytes = (int) read_bin(inpFp, inpPtr, randomLength); - if (0 == inpBytes) break; - -#define CMPBUFSIZE (LZ4_COMPRESSBOUND(MESSAGE_MAX_BYTES)) - { char cmpBuf[CMPBUFSIZE]; - const int cmpBytes = LZ4_compress_HC_continue(lz4Stream, inpPtr, cmpBuf, inpBytes, CMPBUFSIZE); - - if(cmpBytes <= 0) break; - write_int32(outFp, cmpBytes); - write_bin(outFp, cmpBuf, cmpBytes); - - inpOffset += inpBytes; - - // Wraparound the ringbuffer offset - if(inpOffset >= RING_BUFFER_BYTES - MESSAGE_MAX_BYTES) - inpOffset = 0; - } - } - - write_int32(outFp, 0); -} - - -void test_decompress(FILE* outFp, FILE* inpFp) -{ - static char decBuf[DEC_BUFFER_BYTES]; - int decOffset = 0; - LZ4_streamDecode_t lz4StreamDecode_body = { 0 }; - LZ4_streamDecode_t* lz4StreamDecode = &lz4StreamDecode_body; - - for(;;) { - int cmpBytes = 0; - char cmpBuf[CMPBUFSIZE]; - - { const size_t r0 = read_int32(inpFp, &cmpBytes); - size_t r1; - if(r0 != 1 || cmpBytes <= 0) - break; - - r1 = read_bin(inpFp, cmpBuf, cmpBytes); - if(r1 != (size_t) cmpBytes) - break; - } - - { char* const decPtr = &decBuf[decOffset]; - const int decBytes = LZ4_decompress_safe_continue( - lz4StreamDecode, cmpBuf, decPtr, cmpBytes, MESSAGE_MAX_BYTES); - if(decBytes <= 0) - break; - - decOffset += decBytes; - write_bin(outFp, decPtr, decBytes); - - // Wraparound the ringbuffer offset - if(decOffset >= DEC_BUFFER_BYTES - MESSAGE_MAX_BYTES) - decOffset = 0; - } - } -} - - -// Compare 2 files content -// return 0 if identical -// return ByteNb>0 if different -size_t compare(FILE* f0, FILE* f1) -{ - size_t result = 1; - - for (;;) { - char b0[65536]; - char b1[65536]; - const size_t r0 = fread(b0, 1, sizeof(b0), f0); - const size_t r1 = fread(b1, 1, sizeof(b1), f1); - - if ((r0==0) && (r1==0)) return 0; // success - - if (r0 != r1) { - size_t smallest = r0; - if (r1 -#include -#include -#include - -enum { - BLOCK_BYTES = 1024 * 8, -// BLOCK_BYTES = 1024 * 64, -}; - - -size_t write_int(FILE* fp, int i) { - return fwrite(&i, sizeof(i), 1, fp); -} - -size_t write_bin(FILE* fp, const void* array, size_t arrayBytes) { - return fwrite(array, 1, arrayBytes, fp); -} - -size_t read_int(FILE* fp, int* i) { - return fread(i, sizeof(*i), 1, fp); -} - -size_t read_bin(FILE* fp, void* array, size_t arrayBytes) { - return fread(array, 1, arrayBytes, fp); -} - - -void test_compress(FILE* outFp, FILE* inpFp) -{ - LZ4_stream_t lz4Stream_body; - LZ4_stream_t* lz4Stream = &lz4Stream_body; - - char inpBuf[2][BLOCK_BYTES]; - int inpBufIndex = 0; - - LZ4_initStream(lz4Stream, sizeof (*lz4Stream)); - - for(;;) { - char* const inpPtr = inpBuf[inpBufIndex]; - const int inpBytes = (int) read_bin(inpFp, inpPtr, BLOCK_BYTES); - if(0 == inpBytes) { - break; - } - - { - char cmpBuf[LZ4_COMPRESSBOUND(BLOCK_BYTES)]; - const int cmpBytes = LZ4_compress_fast_continue( - lz4Stream, inpPtr, cmpBuf, inpBytes, sizeof(cmpBuf), 1); - if(cmpBytes <= 0) { - break; - } - write_int(outFp, cmpBytes); - write_bin(outFp, cmpBuf, (size_t) cmpBytes); - } - - inpBufIndex = (inpBufIndex + 1) % 2; - } - - write_int(outFp, 0); -} - - -void test_decompress(FILE* outFp, FILE* inpFp) -{ - LZ4_streamDecode_t lz4StreamDecode_body; - LZ4_streamDecode_t* lz4StreamDecode = &lz4StreamDecode_body; - - char decBuf[2][BLOCK_BYTES]; - int decBufIndex = 0; - - LZ4_setStreamDecode(lz4StreamDecode, NULL, 0); - - for(;;) { - char cmpBuf[LZ4_COMPRESSBOUND(BLOCK_BYTES)]; - int cmpBytes = 0; - - { - const size_t readCount0 = read_int(inpFp, &cmpBytes); - if(readCount0 != 1 || cmpBytes <= 0) { - break; - } - - const size_t readCount1 = read_bin(inpFp, cmpBuf, (size_t) cmpBytes); - if(readCount1 != (size_t) cmpBytes) { - break; - } - } - - { - char* const decPtr = decBuf[decBufIndex]; - const int decBytes = LZ4_decompress_safe_continue( - lz4StreamDecode, cmpBuf, decPtr, cmpBytes, BLOCK_BYTES); - if(decBytes <= 0) { - break; - } - write_bin(outFp, decPtr, (size_t) decBytes); - } - - decBufIndex = (decBufIndex + 1) % 2; - } -} - - -int compare(FILE* fp0, FILE* fp1) -{ - int result = 0; - - while(0 == result) { - char b0[65536]; - char b1[65536]; - const size_t r0 = read_bin(fp0, b0, sizeof(b0)); - const size_t r1 = read_bin(fp1, b1, sizeof(b1)); - - result = (int) r0 - (int) r1; - - if(0 == r0 || 0 == r1) { - break; - } - if(0 == result) { - result = memcmp(b0, b1, r0); - } - } - - return result; -} - - -int main(int argc, char* argv[]) -{ - char inpFilename[256] = { 0 }; - char lz4Filename[256] = { 0 }; - char decFilename[256] = { 0 }; - - if(argc < 2) { - printf("Please specify input filename\n"); - return 0; - } - - snprintf(inpFilename, 256, "%s", argv[1]); - snprintf(lz4Filename, 256, "%s.lz4s-%d", argv[1], BLOCK_BYTES); - snprintf(decFilename, 256, "%s.lz4s-%d.dec", argv[1], BLOCK_BYTES); - - printf("inp = [%s]\n", inpFilename); - printf("lz4 = [%s]\n", lz4Filename); - printf("dec = [%s]\n", decFilename); - - // compress - { - FILE* inpFp = fopen(inpFilename, "rb"); - FILE* outFp = fopen(lz4Filename, "wb"); - - printf("compress : %s -> %s\n", inpFilename, lz4Filename); - test_compress(outFp, inpFp); - printf("compress : done\n"); - - fclose(outFp); - fclose(inpFp); - } - - // decompress - { - FILE* inpFp = fopen(lz4Filename, "rb"); - FILE* outFp = fopen(decFilename, "wb"); - - printf("decompress : %s -> %s\n", lz4Filename, decFilename); - test_decompress(outFp, inpFp); - printf("decompress : done\n"); - - fclose(outFp); - fclose(inpFp); - } - - // verify - { - FILE* inpFp = fopen(inpFilename, "rb"); - FILE* decFp = fopen(decFilename, "rb"); - - printf("verify : %s <-> %s\n", inpFilename, decFilename); - const int cmp = compare(inpFp, decFp); - if(0 == cmp) { - printf("verify : OK\n"); - } else { - printf("verify : NG\n"); - } - - fclose(decFp); - fclose(inpFp); - } - - return 0; -} diff --git a/librocksdb-sys/lz4/examples/blockStreaming_doubleBuffer.md b/librocksdb-sys/lz4/examples/blockStreaming_doubleBuffer.md deleted file mode 100644 index 38dc2e8..0000000 --- a/librocksdb-sys/lz4/examples/blockStreaming_doubleBuffer.md +++ /dev/null @@ -1,100 +0,0 @@ -# LZ4 Streaming API Example : Double Buffer -by *Takayuki Matsuoka* - -`blockStreaming_doubleBuffer.c` is LZ4 Streaming API example which implements double buffer (de)compression. - -Please note : - - - Firstly, read "LZ4 Streaming API Basics". - - This is relatively advanced application example. - - Output file is not compatible with lz4frame and platform dependent. - - -## What's the point of this example ? - - - Handle huge file in small amount of memory - - Always better compression ratio than Block API - - Uniform block size - - -## How the compression works - -First of all, allocate "Double Buffer" for input and LZ4 compressed data buffer for output. -Double buffer has two pages, "first" page (Page#1) and "second" page (Page#2). - -``` - Double Buffer - - Page#1 Page#2 - +---------+---------+ - | Block#1 | | - +----+----+---------+ - | - v - {Out#1} - - - Prefix Dependency - +---------+ - | | - v | - +---------+----+----+ - | Block#1 | Block#2 | - +---------+----+----+ - | - v - {Out#2} - - - External Dictionary Mode - +---------+ - | | - | v - +----+----+---------+ - | Block#3 | Block#2 | - +----+----+---------+ - | - v - {Out#3} - - - Prefix Dependency - +---------+ - | | - v | - +---------+----+----+ - | Block#3 | Block#4 | - +---------+----+----+ - | - v - {Out#4} -``` - -Next, read first block to double buffer's first page. And compress it by `LZ4_compress_continue()`. -For the first time, LZ4 doesn't know any previous dependencies, -so it just compress the line without dependencies and generates compressed block {Out#1} to LZ4 compressed data buffer. -After that, write {Out#1} to the file. - -Next, read second block to double buffer's second page. And compress it. -This time, LZ4 can use dependency to Block#1 to improve compression ratio. -This dependency is called "Prefix mode". - -Next, read third block to double buffer's *first* page, and compress it. -Also this time, LZ4 can use dependency to Block#2. -This dependency is called "External Dictonaly mode". - -Continue these procedure to the end of the file. - - -## How the decompression works - -Decompression will do reverse order. - - - Read first compressed block. - - Decompress it to the first page and write that page to the file. - - Read second compressed block. - - Decompress it to the second page and write that page to the file. - - Read third compressed block. - - Decompress it to the *first* page and write that page to the file. - -Continue these procedure to the end of the compressed file. diff --git a/librocksdb-sys/lz4/examples/blockStreaming_lineByLine.c b/librocksdb-sys/lz4/examples/blockStreaming_lineByLine.c deleted file mode 100644 index 3047a3a..0000000 --- a/librocksdb-sys/lz4/examples/blockStreaming_lineByLine.c +++ /dev/null @@ -1,211 +0,0 @@ -// LZ4 streaming API example : line-by-line logfile compression -// by Takayuki Matsuoka - - -#if defined(_MSC_VER) && (_MSC_VER <= 1800) /* Visual Studio <= 2013 */ -# define _CRT_SECURE_NO_WARNINGS -# define snprintf sprintf_s -#endif -#include "lz4.h" - -#include -#include -#include -#include - -static size_t write_uint16(FILE* fp, uint16_t i) -{ - return fwrite(&i, sizeof(i), 1, fp); -} - -static size_t write_bin(FILE* fp, const void* array, int arrayBytes) -{ - return fwrite(array, 1, arrayBytes, fp); -} - -static size_t read_uint16(FILE* fp, uint16_t* i) -{ - return fread(i, sizeof(*i), 1, fp); -} - -static size_t read_bin(FILE* fp, void* array, int arrayBytes) -{ - return fread(array, 1, arrayBytes, fp); -} - - -static void test_compress( - FILE* outFp, - FILE* inpFp, - size_t messageMaxBytes, - size_t ringBufferBytes) -{ - LZ4_stream_t* const lz4Stream = LZ4_createStream(); - const size_t cmpBufBytes = LZ4_COMPRESSBOUND(messageMaxBytes); - char* const cmpBuf = (char*) malloc(cmpBufBytes); - char* const inpBuf = (char*) malloc(ringBufferBytes); - int inpOffset = 0; - - for ( ; ; ) - { - char* const inpPtr = &inpBuf[inpOffset]; - -#if 0 - // Read random length data to the ring buffer. - const int randomLength = (rand() % messageMaxBytes) + 1; - const int inpBytes = (int) read_bin(inpFp, inpPtr, randomLength); - if (0 == inpBytes) break; -#else - // Read line to the ring buffer. - int inpBytes = 0; - if (!fgets(inpPtr, (int) messageMaxBytes, inpFp)) - break; - inpBytes = (int) strlen(inpPtr); -#endif - - { - const int cmpBytes = LZ4_compress_fast_continue( - lz4Stream, inpPtr, cmpBuf, inpBytes, (int) cmpBufBytes, 1); - if (cmpBytes <= 0) break; - write_uint16(outFp, (uint16_t) cmpBytes); - write_bin(outFp, cmpBuf, cmpBytes); - - // Add and wraparound the ringbuffer offset - inpOffset += inpBytes; - if ((size_t)inpOffset >= ringBufferBytes - messageMaxBytes) inpOffset = 0; - } - } - write_uint16(outFp, 0); - - free(inpBuf); - free(cmpBuf); - LZ4_freeStream(lz4Stream); -} - - -static void test_decompress( - FILE* outFp, - FILE* inpFp, - size_t messageMaxBytes, - size_t ringBufferBytes) -{ - LZ4_streamDecode_t* const lz4StreamDecode = LZ4_createStreamDecode(); - char* const cmpBuf = (char*) malloc(LZ4_COMPRESSBOUND(messageMaxBytes)); - char* const decBuf = (char*) malloc(ringBufferBytes); - int decOffset = 0; - - for ( ; ; ) - { - uint16_t cmpBytes = 0; - - if (read_uint16(inpFp, &cmpBytes) != 1) break; - if (cmpBytes == 0) break; - if (read_bin(inpFp, cmpBuf, cmpBytes) != cmpBytes) break; - - { - char* const decPtr = &decBuf[decOffset]; - const int decBytes = LZ4_decompress_safe_continue( - lz4StreamDecode, cmpBuf, decPtr, cmpBytes, (int) messageMaxBytes); - if (decBytes <= 0) break; - write_bin(outFp, decPtr, decBytes); - - // Add and wraparound the ringbuffer offset - decOffset += decBytes; - if ((size_t)decOffset >= ringBufferBytes - messageMaxBytes) decOffset = 0; - } - } - - free(decBuf); - free(cmpBuf); - LZ4_freeStreamDecode(lz4StreamDecode); -} - - -static int compare(FILE* f0, FILE* f1) -{ - int result = 0; - const size_t tempBufferBytes = 65536; - char* const b0 = (char*) malloc(tempBufferBytes); - char* const b1 = (char*) malloc(tempBufferBytes); - - while(0 == result) - { - const size_t r0 = fread(b0, 1, tempBufferBytes, f0); - const size_t r1 = fread(b1, 1, tempBufferBytes, f1); - - result = (int) r0 - (int) r1; - - if (0 == r0 || 0 == r1) break; - if (0 == result) result = memcmp(b0, b1, r0); - } - - free(b1); - free(b0); - return result; -} - - -int main(int argc, char* argv[]) -{ - enum { - MESSAGE_MAX_BYTES = 1024, - RING_BUFFER_BYTES = 1024 * 256 + MESSAGE_MAX_BYTES, - }; - - char inpFilename[256] = { 0 }; - char lz4Filename[256] = { 0 }; - char decFilename[256] = { 0 }; - - if (argc < 2) - { - printf("Please specify input filename\n"); - return 0; - } - - snprintf(inpFilename, 256, "%s", argv[1]); - snprintf(lz4Filename, 256, "%s.lz4s", argv[1]); - snprintf(decFilename, 256, "%s.lz4s.dec", argv[1]); - - printf("inp = [%s]\n", inpFilename); - printf("lz4 = [%s]\n", lz4Filename); - printf("dec = [%s]\n", decFilename); - - // compress - { - FILE* inpFp = fopen(inpFilename, "rb"); - FILE* outFp = fopen(lz4Filename, "wb"); - - test_compress(outFp, inpFp, MESSAGE_MAX_BYTES, RING_BUFFER_BYTES); - - fclose(outFp); - fclose(inpFp); - } - - // decompress - { - FILE* inpFp = fopen(lz4Filename, "rb"); - FILE* outFp = fopen(decFilename, "wb"); - - test_decompress(outFp, inpFp, MESSAGE_MAX_BYTES, RING_BUFFER_BYTES); - - fclose(outFp); - fclose(inpFp); - } - - // verify - { - FILE* inpFp = fopen(inpFilename, "rb"); - FILE* decFp = fopen(decFilename, "rb"); - - const int cmp = compare(inpFp, decFp); - if (0 == cmp) - printf("Verify : OK\n"); - else - printf("Verify : NG\n"); - - fclose(decFp); - fclose(inpFp); - } - - return 0; -} diff --git a/librocksdb-sys/lz4/examples/blockStreaming_lineByLine.md b/librocksdb-sys/lz4/examples/blockStreaming_lineByLine.md deleted file mode 100644 index 7b66883..0000000 --- a/librocksdb-sys/lz4/examples/blockStreaming_lineByLine.md +++ /dev/null @@ -1,122 +0,0 @@ -# LZ4 Streaming API Example : Line by Line Text Compression -by *Takayuki Matsuoka* - -`blockStreaming_lineByLine.c` is LZ4 Streaming API example which implements line by line incremental (de)compression. - -Please note the following restrictions : - - - Firstly, read "LZ4 Streaming API Basics". - - This is relatively advanced application example. - - Output file is not compatible with lz4frame and platform dependent. - - -## What's the point of this example ? - - - Line by line incremental (de)compression. - - Handle huge file in small amount of memory - - Generally better compression ratio than Block API - - Non-uniform block size - - -## How the compression works - -First of all, allocate "Ring Buffer" for input and LZ4 compressed data buffer for output. - -``` -(1) - Ring Buffer - - +--------+ - | Line#1 | - +---+----+ - | - v - {Out#1} - - -(2) - Prefix Mode Dependency - +----+ - | | - v | - +--------+-+------+ - | Line#1 | Line#2 | - +--------+---+----+ - | - v - {Out#2} - - -(3) - Prefix Prefix - +----+ +----+ - | | | | - v | v | - +--------+-+------+-+------+ - | Line#1 | Line#2 | Line#3 | - +--------+--------+---+----+ - | - v - {Out#3} - - -(4) - External Dictionary Mode - +----+ +----+ - | | | | - v | v | - ------+--------+-+------+-+--------+ - | .... | Line#X | Line#X+1 | - ------+--------+--------+-----+----+ - ^ | - | v - | {Out#X+1} - | - Reset - - -(5) - Prefix - +-----+ - | | - v | - ------+--------+--------+----------+--+-------+ - | .... | Line#X | Line#X+1 | Line#X+2 | - ------+--------+--------+----------+-----+----+ - ^ | - | v - | {Out#X+2} - | - Reset -``` - -Next (see (1)), read first line to ringbuffer and compress it by `LZ4_compress_continue()`. -For the first time, LZ4 doesn't know any previous dependencies, -so it just compress the line without dependencies and generates compressed line {Out#1} to LZ4 compressed data buffer. -After that, write {Out#1} to the file and forward ringbuffer offset. - -Do the same things to second line (see (2)). -But in this time, LZ4 can use dependency to Line#1 to improve compression ratio. -This dependency is called "Prefix mode". - -Eventually, we'll reach end of ringbuffer at Line#X (see (4)). -This time, we should reset ringbuffer offset. -After resetting, at Line#X+1 pointer is not adjacent, but LZ4 still maintain its memory. -This is called "External Dictionary Mode". - -In Line#X+2 (see (5)), finally LZ4 forget almost all memories but still remains Line#X+1. -This is the same situation as Line#2. - -Continue these procedures to the end of text file. - - -## How the decompression works - -Decompression will do reverse order. - - - Read compressed line from the file to buffer. - - Decompress it to the ringbuffer. - - Output decompressed plain text line to the file. - - Forward ringbuffer offset. If offset exceeds end of the ringbuffer, reset it. - -Continue these procedures to the end of the compressed file. diff --git a/librocksdb-sys/lz4/examples/blockStreaming_ringBuffer.c b/librocksdb-sys/lz4/examples/blockStreaming_ringBuffer.c deleted file mode 100644 index 0b6a3ce..0000000 --- a/librocksdb-sys/lz4/examples/blockStreaming_ringBuffer.c +++ /dev/null @@ -1,190 +0,0 @@ -/* LZ4 streaming API example : ring buffer - * Based on sample code from Takayuki Matsuoka */ - - -/************************************** - * Compiler Options - **************************************/ -#if defined(_MSC_VER) && (_MSC_VER <= 1800) /* Visual Studio <= 2013 */ -# define _CRT_SECURE_NO_WARNINGS -# define snprintf sprintf_s -#endif - - -/************************************** - * Includes - **************************************/ -#include -#include -#include -#include -#include "lz4.h" - - -enum { - MESSAGE_MAX_BYTES = 1024, - RING_BUFFER_BYTES = 1024 * 8 + MESSAGE_MAX_BYTES, - DECODE_RING_BUFFER = RING_BUFFER_BYTES + MESSAGE_MAX_BYTES /* Intentionally larger, to test unsynchronized ring buffers */ -}; - - -size_t write_int32(FILE* fp, int32_t i) { - return fwrite(&i, sizeof(i), 1, fp); -} - -size_t write_bin(FILE* fp, const void* array, int arrayBytes) { - return fwrite(array, 1, arrayBytes, fp); -} - -size_t read_int32(FILE* fp, int32_t* i) { - return fread(i, sizeof(*i), 1, fp); -} - -size_t read_bin(FILE* fp, void* array, int arrayBytes) { - return fread(array, 1, arrayBytes, fp); -} - - -void test_compress(FILE* outFp, FILE* inpFp) -{ - LZ4_stream_t lz4Stream_body = { { 0 } }; - LZ4_stream_t* lz4Stream = &lz4Stream_body; - - static char inpBuf[RING_BUFFER_BYTES]; - int inpOffset = 0; - - for(;;) { - // Read random length ([1,MESSAGE_MAX_BYTES]) data to the ring buffer. - char* const inpPtr = &inpBuf[inpOffset]; - const int randomLength = (rand() % MESSAGE_MAX_BYTES) + 1; - const int inpBytes = (int) read_bin(inpFp, inpPtr, randomLength); - if (0 == inpBytes) break; - - { -#define CMPBUFSIZE (LZ4_COMPRESSBOUND(MESSAGE_MAX_BYTES)) - char cmpBuf[CMPBUFSIZE]; - const int cmpBytes = LZ4_compress_fast_continue(lz4Stream, inpPtr, cmpBuf, inpBytes, CMPBUFSIZE, 0); - if(cmpBytes <= 0) break; - write_int32(outFp, cmpBytes); - write_bin(outFp, cmpBuf, cmpBytes); - - inpOffset += inpBytes; - - // Wraparound the ringbuffer offset - if(inpOffset >= RING_BUFFER_BYTES - MESSAGE_MAX_BYTES) inpOffset = 0; - } - } - - write_int32(outFp, 0); -} - - -void test_decompress(FILE* outFp, FILE* inpFp) -{ - static char decBuf[DECODE_RING_BUFFER]; - int decOffset = 0; - LZ4_streamDecode_t lz4StreamDecode_body = { { 0 } }; - LZ4_streamDecode_t* lz4StreamDecode = &lz4StreamDecode_body; - - for(;;) { - int cmpBytes = 0; - char cmpBuf[CMPBUFSIZE]; - - { const size_t r0 = read_int32(inpFp, &cmpBytes); - if(r0 != 1 || cmpBytes <= 0) break; - - const size_t r1 = read_bin(inpFp, cmpBuf, cmpBytes); - if(r1 != (size_t) cmpBytes) break; - } - - { char* const decPtr = &decBuf[decOffset]; - const int decBytes = LZ4_decompress_safe_continue( - lz4StreamDecode, cmpBuf, decPtr, cmpBytes, MESSAGE_MAX_BYTES); - if(decBytes <= 0) break; - decOffset += decBytes; - write_bin(outFp, decPtr, decBytes); - - // Wraparound the ringbuffer offset - if(decOffset >= DECODE_RING_BUFFER - MESSAGE_MAX_BYTES) decOffset = 0; - } - } -} - - -int compare(FILE* f0, FILE* f1) -{ - int result = 0; - - while (0 == result) { - char b0[65536]; - char b1[65536]; - const size_t r0 = fread(b0, 1, sizeof(b0), f0); - const size_t r1 = fread(b1, 1, sizeof(b1), f1); - - result = (int) r0 - (int) r1; - - if (0 == r0 || 0 == r1) break; - - if (0 == result) result = memcmp(b0, b1, r0); - } - - return result; -} - - -int main(int argc, char** argv) -{ - char inpFilename[256] = { 0 }; - char lz4Filename[256] = { 0 }; - char decFilename[256] = { 0 }; - - if (argc < 2) { - printf("Please specify input filename\n"); - return 0; - } - - snprintf(inpFilename, 256, "%s", argv[1]); - snprintf(lz4Filename, 256, "%s.lz4s-%d", argv[1], 0); - snprintf(decFilename, 256, "%s.lz4s-%d.dec", argv[1], 0); - - printf("inp = [%s]\n", inpFilename); - printf("lz4 = [%s]\n", lz4Filename); - printf("dec = [%s]\n", decFilename); - - // compress - { FILE* const inpFp = fopen(inpFilename, "rb"); - FILE* const outFp = fopen(lz4Filename, "wb"); - - test_compress(outFp, inpFp); - - fclose(outFp); - fclose(inpFp); - } - - // decompress - { FILE* const inpFp = fopen(lz4Filename, "rb"); - FILE* const outFp = fopen(decFilename, "wb"); - - test_decompress(outFp, inpFp); - - fclose(outFp); - fclose(inpFp); - } - - // verify - { FILE* const inpFp = fopen(inpFilename, "rb"); - FILE* const decFp = fopen(decFilename, "rb"); - - const int cmp = compare(inpFp, decFp); - if (0 == cmp) { - printf("Verify : OK\n"); - } else { - printf("Verify : NG\n"); - } - - fclose(decFp); - fclose(inpFp); - } - - return 0; -} diff --git a/librocksdb-sys/lz4/examples/compress_functions.c b/librocksdb-sys/lz4/examples/compress_functions.c deleted file mode 100644 index 2a9d124..0000000 --- a/librocksdb-sys/lz4/examples/compress_functions.c +++ /dev/null @@ -1,363 +0,0 @@ -/* - * compress_functions.c - * Copyright : Kyle Harper - * License : Follows same licensing as the lz4.c/lz4.h program at any given time. Currently, BSD 2. - * Description: A program to demonstrate the various compression functions involved in when using LZ4_compress_default(). The idea - * is to show how each step in the call stack can be used directly, if desired. There is also some benchmarking for - * each function to demonstrate the (probably lack of) performance difference when jumping the stack. - * (If you're new to lz4, please read simple_buffer.c to understand the fundamentals) - * - * The call stack (before theoretical compiler optimizations) for LZ4_compress_default is as follows: - * LZ4_compress_default - * LZ4_compress_fast - * LZ4_compress_fast_extState - * LZ4_compress_generic - * - * LZ4_compress_default() - * This is the recommended function for compressing data. It will serve as the baseline for comparison. - * LZ4_compress_fast() - * Despite its name, it's not a "fast" version of compression. It simply decides if HEAPMODE is set and either - * allocates memory on the heap for a struct or creates the struct directly on the stack. Stack access is generally - * faster but this function itself isn't giving that advantage, it's just some logic for compile time. - * LZ4_compress_fast_extState() - * This simply accepts all the pointers and values collected thus far and adds logic to determine how - * LZ4_compress_generic should be invoked; specifically: can the source fit into a single pass as determined by - * LZ4_64Klimit. - * LZ4_compress_generic() - * As the name suggests, this is the generic function that ultimately does most of the heavy lifting. Calling this - * directly can help avoid some test cases and branching which might be useful in some implementation-specific - * situations, but you really need to know what you're doing AND what you're asking lz4 to do! You also need a - * wrapper function because this function isn't exposed with lz4.h. - * - * The call stack for decompression functions is shallow. There are 2 options: - * LZ4_decompress_safe || LZ4_decompress_fast - * LZ4_decompress_generic - * - * LZ4_decompress_safe - * This is the recommended function for decompressing data. It is considered safe because the caller specifies - * both the size of the compressed buffer to read as well as the maximum size of the output (decompressed) buffer - * instead of just the latter. - * LZ4_decompress_fast - * Again, despite its name it's not a "fast" version of decompression. It simply frees the caller of sending the - * size of the compressed buffer (it will simply be read-to-end, hence it's non-safety). - * LZ4_decompress_generic - * This is the generic function that both of the LZ4_decompress_* functions above end up calling. Calling this - * directly is not advised, period. Furthermore, it is a static inline function in lz4.c, so there isn't a symbol - * exposed for anyone using lz4.h to utilize. - * - * Special Note About Decompression: - * Using the LZ4_decompress_safe() function protects against malicious (user) input. If you are using data from a - * trusted source, or if your program is the producer (P) as well as its consumer (C) in a PC or MPMC setup, you can - * safely use the LZ4_decompress_fast function. - */ - -/* Since lz4 compiles with c99 and not gnu/std99 we need to enable POSIX linking for time.h structs and functions. */ -#if __STDC_VERSION__ >= 199901L -#define _XOPEN_SOURCE 600 -#else -#define _XOPEN_SOURCE 500 -#endif -#define _POSIX_C_SOURCE 199309L - -/* Includes, for Power! */ -#define LZ4_DISABLE_DEPRECATE_WARNINGS /* LZ4_decompress_fast */ -#include "lz4.h" -#include /* for printf() */ -#include /* for exit() */ -#include /* for atoi() memcmp() */ -#include /* for uint_types */ -#include /* for PRIu64 */ -#include /* for clock_gettime() */ -#include /* for setlocale() */ - -/* We need to know what one billion is for clock timing. */ -#define BILLION 1000000000L - -/* Create a crude set of test IDs so we can switch on them later (Can't switch() on a char[] or char*). */ -#define ID__LZ4_COMPRESS_DEFAULT 1 -#define ID__LZ4_COMPRESS_FAST 2 -#define ID__LZ4_COMPRESS_FAST_EXTSTATE 3 -#define ID__LZ4_COMPRESS_GENERIC 4 -#define ID__LZ4_DECOMPRESS_SAFE 5 -#define ID__LZ4_DECOMPRESS_FAST 6 - - - -/* - * Easy show-error-and-bail function. - */ -void run_screaming(const char *message, const int code) { - printf("%s\n", message); - exit(code); -} - - -/* - * Centralize the usage function to keep main cleaner. - */ -void usage(const char *message) { - printf("Usage: ./argPerformanceTesting \n"); - run_screaming(message, 1); - return; -} - - - -/* - * Runs the benchmark for LZ4_compress_* based on function_id. - */ -uint64_t bench( - const char *known_good_dst, - const int function_id, - const int iterations, - const char *src, - char *dst, - const size_t src_size, - const size_t max_dst_size, - const size_t comp_size - ) { - uint64_t time_taken = 0; - int rv = 0; - const int warm_up = 5000; - struct timespec start, end; - const int acceleration = 1; - LZ4_stream_t state; - - // Select the right function to perform the benchmark on. We perform 5000 initial loops to warm the cache and ensure that dst - // remains matching to known_good_dst between successive calls. - switch(function_id) { - case ID__LZ4_COMPRESS_DEFAULT: - printf("Starting benchmark for function: LZ4_compress_default()\n"); - for(int junk=0; junk 1) - iterations = atoi(argv[1]); - if (iterations < 1) - usage("Argument 1 (iterations) must be > 0."); - - // First we will create 2 sources (char *) of 2000 bytes each. One normal text, the other highly-compressible text. - const char *src = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed luctus purus et risus vulputate, et mollis orci ullamcorper. Nulla facilisi. Fusce in ligula sed purus varius aliquet interdum vitae justo. Proin quis diam velit. Nulla varius iaculis auctor. Cras volutpat, justo eu dictum pulvinar, elit sem porttitor metus, et imperdiet metus sapien et ante. Nullam nisi nulla, ornare eu tristique eu, dignissim vitae diam. Nulla sagittis porta libero, a accumsan felis sagittis scelerisque. Integer laoreet eleifend congue. Etiam rhoncus leo vel dolor fermentum, quis luctus nisl iaculis. Praesent a erat sapien. Aliquam semper mi in lorem ultrices ultricies. Lorem ipsum dolor sit amet, consectetur adipiscing elit. In feugiat risus sed enim ultrices, at sodales nulla tristique. Maecenas eget pellentesque justo, sed pellentesque lectus. Fusce sagittis sit amet elit vel varius. Donec sed ligula nec ligula vulputate rutrum sed ut lectus. Etiam congue pharetra leo vitae cursus. Morbi enim ante, porttitor ut varius vel, tincidunt quis justo. Nunc iaculis, risus id ultrices semper, metus est efficitur ligula, vel posuere risus nunc eget purus. Ut lorem turpis, condimentum at sem sed, porta aliquam turpis. In ut sapien a nulla dictum tincidunt quis sit amet lorem. Fusce at est egestas, luctus neque eu, consectetur tortor. Phasellus eleifend ultricies nulla ac lobortis. Morbi maximus quam cursus vehicula iaculis. Maecenas cursus vel justo ut rutrum. Curabitur magna orci, dignissim eget dapibus vitae, finibus id lacus. Praesent rhoncus mattis augue vitae bibendum. Praesent porta mauris non ultrices fermentum. Quisque vulputate ipsum in sodales pulvinar. Aliquam nec mollis felis. Donec vitae augue pulvinar, congue nisl sed, pretium purus. Fusce lobortis mi ac neque scelerisque semper. Pellentesque vel est vitae magna aliquet aliquet. Nam non dolor. Nulla facilisi. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Morbi ac lacinia felis metus."; - const char *hc_src = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; - // Set and derive sizes. Since we're using strings, use strlen() + 1 for \0. - const size_t src_size = strlen(src) + 1; - const size_t max_dst_size = LZ4_compressBound(src_size); - int bytes_returned = 0; - // Now build allocations for the data we'll be playing with. - char *dst = calloc(1, max_dst_size); - char *known_good_dst = calloc(1, max_dst_size); - char *known_good_hc_dst = calloc(1, max_dst_size); - if (dst == NULL || known_good_dst == NULL || known_good_hc_dst == NULL) - run_screaming("Couldn't allocate memory for the destination buffers. Sad :(", 1); - - // Create known-good buffers to verify our tests with other functions will produce the same results. - bytes_returned = LZ4_compress_default(src, known_good_dst, src_size, max_dst_size); - if (bytes_returned < 1) - run_screaming("Couldn't create a known-good destination buffer for comparison... this is bad.", 1); - const size_t src_comp_size = bytes_returned; - bytes_returned = LZ4_compress_default(hc_src, known_good_hc_dst, src_size, max_dst_size); - if (bytes_returned < 1) - run_screaming("Couldn't create a known-good (highly compressible) destination buffer for comparison... this is bad.", 1); - const size_t hc_src_comp_size = bytes_returned; - - - /* LZ4_compress_default() */ - // This is the default function so we don't need to demonstrate how to use it. See basics.c if you need more basal information. - - /* LZ4_compress_fast() */ - // Using this function is identical to LZ4_compress_default except we need to specify an "acceleration" value. Defaults to 1. - memset(dst, 0, max_dst_size); - bytes_returned = LZ4_compress_fast(src, dst, src_size, max_dst_size, 1); - if (bytes_returned < 1) - run_screaming("Failed to compress src using LZ4_compress_fast. echo $? for return code.", bytes_returned); - if (memcmp(dst, known_good_dst, bytes_returned) != 0) - run_screaming("According to memcmp(), the value we got in dst from LZ4_compress_fast doesn't match the known-good value. This is bad.", 1); - - /* LZ4_compress_fast_extState() */ - // Using this function directly requires that we build an LZ4_stream_t struct ourselves. We do NOT have to reset it ourselves. - memset(dst, 0, max_dst_size); - LZ4_stream_t state; - bytes_returned = LZ4_compress_fast_extState(&state, src, dst, src_size, max_dst_size, 1); - if (bytes_returned < 1) - run_screaming("Failed to compress src using LZ4_compress_fast_extState. echo $? for return code.", bytes_returned); - if (memcmp(dst, known_good_dst, bytes_returned) != 0) - run_screaming("According to memcmp(), the value we got in dst from LZ4_compress_fast_extState doesn't match the known-good value. This is bad.", 1); - - /* LZ4_compress_generic */ - // When you can exactly control the inputs and options of your LZ4 needs, you can use LZ4_compress_generic and fixed (const) - // values for the enum types such as dictionary and limitations. Any other direct-use is probably a bad idea. - // - // That said, the LZ4_compress_generic() function is 'static inline' and does not have a prototype in lz4.h to expose a symbol - // for it. In other words: we can't access it directly. I don't want to submit a PR that modifies lz4.c/h. Yann and others can - // do that if they feel it's worth expanding this example. - // - // I will, however, leave a skeleton of what would be required to use it directly: - /* - memset(dst, 0, max_dst_size); - // LZ4_stream_t state: is already declared above. We can reuse it BUT we have to reset the stream ourselves between each call. - LZ4_resetStream((LZ4_stream_t *)&state); - // Since src size is small we know the following enums will be used: notLimited (0), byU16 (2), noDict (0), noDictIssue (0). - bytes_returned = LZ4_compress_generic(&state, src, dst, src_size, max_dst_size, notLimited, byU16, noDict, noDictIssue, 1); - if (bytes_returned < 1) - run_screaming("Failed to compress src using LZ4_compress_generic. echo $? for return code.", bytes_returned); - if (memcmp(dst, known_good_dst, bytes_returned) != 0) - run_screaming("According to memcmp(), the value we got in dst from LZ4_compress_generic doesn't match the known-good value. This is bad.", 1); - */ - - - /* Benchmarking */ - /* Now we'll run a few rudimentary benchmarks with each function to demonstrate differences in speed based on the function used. - * Remember, we cannot call LZ4_compress_generic() directly (yet) so it's disabled. - */ - // Suite A - Normal Compressibility - char *dst_d = calloc(1, src_size); - memset(dst, 0, max_dst_size); - printf("\nStarting suite A: Normal compressible text.\n"); - uint64_t time_taken__default = bench(known_good_dst, ID__LZ4_COMPRESS_DEFAULT, iterations, src, dst, src_size, max_dst_size, src_comp_size); - uint64_t time_taken__fast = bench(known_good_dst, ID__LZ4_COMPRESS_FAST, iterations, src, dst, src_size, max_dst_size, src_comp_size); - uint64_t time_taken__fast_extstate = bench(known_good_dst, ID__LZ4_COMPRESS_FAST_EXTSTATE, iterations, src, dst, src_size, max_dst_size, src_comp_size); - //uint64_t time_taken__generic = bench(known_good_dst, ID__LZ4_COMPRESS_GENERIC, iterations, src, dst, src_size, max_dst_size, src_comp_size); - uint64_t time_taken__decomp_safe = bench(src, ID__LZ4_DECOMPRESS_SAFE, iterations, known_good_dst, dst_d, src_size, max_dst_size, src_comp_size); - uint64_t time_taken__decomp_fast = bench(src, ID__LZ4_DECOMPRESS_FAST, iterations, known_good_dst, dst_d, src_size, max_dst_size, src_comp_size); - // Suite B - Highly Compressible - memset(dst, 0, max_dst_size); - printf("\nStarting suite B: Highly compressible text.\n"); - uint64_t time_taken_hc__default = bench(known_good_hc_dst, ID__LZ4_COMPRESS_DEFAULT, iterations, hc_src, dst, src_size, max_dst_size, hc_src_comp_size); - uint64_t time_taken_hc__fast = bench(known_good_hc_dst, ID__LZ4_COMPRESS_FAST, iterations, hc_src, dst, src_size, max_dst_size, hc_src_comp_size); - uint64_t time_taken_hc__fast_extstate = bench(known_good_hc_dst, ID__LZ4_COMPRESS_FAST_EXTSTATE, iterations, hc_src, dst, src_size, max_dst_size, hc_src_comp_size); - //uint64_t time_taken_hc__generic = bench(known_good_hc_dst, ID__LZ4_COMPRESS_GENERIC, iterations, hc_src, dst, src_size, max_dst_size, hc_src_comp_size); - uint64_t time_taken_hc__decomp_safe = bench(hc_src, ID__LZ4_DECOMPRESS_SAFE, iterations, known_good_hc_dst, dst_d, src_size, max_dst_size, hc_src_comp_size); - uint64_t time_taken_hc__decomp_fast = bench(hc_src, ID__LZ4_DECOMPRESS_FAST, iterations, known_good_hc_dst, dst_d, src_size, max_dst_size, hc_src_comp_size); - - // Report and leave. - setlocale(LC_ALL, ""); - const char *format = "|%-14s|%-30s|%'14.9f|%'16d|%'14d|%'13.2f%%|\n"; - const char *header_format = "|%-14s|%-30s|%14s|%16s|%14s|%14s|\n"; - const char *separator = "+--------------+------------------------------+--------------+----------------+--------------+--------------+\n"; - printf("\n"); - printf("%s", separator); - printf(header_format, "Source", "Function Benchmarked", "Total Seconds", "Iterations/sec", "ns/Iteration", "% of default"); - printf("%s", separator); - printf(format, "Normal Text", "LZ4_compress_default()", (double)time_taken__default / BILLION, (int)(iterations / ((double)time_taken__default /BILLION)), (int)time_taken__default / iterations, (double)time_taken__default * 100 / time_taken__default); - printf(format, "Normal Text", "LZ4_compress_fast()", (double)time_taken__fast / BILLION, (int)(iterations / ((double)time_taken__fast /BILLION)), (int)time_taken__fast / iterations, (double)time_taken__fast * 100 / time_taken__default); - printf(format, "Normal Text", "LZ4_compress_fast_extState()", (double)time_taken__fast_extstate / BILLION, (int)(iterations / ((double)time_taken__fast_extstate /BILLION)), (int)time_taken__fast_extstate / iterations, (double)time_taken__fast_extstate * 100 / time_taken__default); - //printf(format, "Normal Text", "LZ4_compress_generic()", (double)time_taken__generic / BILLION, (int)(iterations / ((double)time_taken__generic /BILLION)), (int)time_taken__generic / iterations, (double)time_taken__generic * 100 / time_taken__default); - printf(format, "Normal Text", "LZ4_decompress_safe()", (double)time_taken__decomp_safe / BILLION, (int)(iterations / ((double)time_taken__decomp_safe /BILLION)), (int)time_taken__decomp_safe / iterations, (double)time_taken__decomp_safe * 100 / time_taken__default); - printf(format, "Normal Text", "LZ4_decompress_fast()", (double)time_taken__decomp_fast / BILLION, (int)(iterations / ((double)time_taken__decomp_fast /BILLION)), (int)time_taken__decomp_fast / iterations, (double)time_taken__decomp_fast * 100 / time_taken__default); - printf(header_format, "", "", "", "", "", ""); - printf(format, "Compressible", "LZ4_compress_default()", (double)time_taken_hc__default / BILLION, (int)(iterations / ((double)time_taken_hc__default /BILLION)), (int)time_taken_hc__default / iterations, (double)time_taken_hc__default * 100 / time_taken_hc__default); - printf(format, "Compressible", "LZ4_compress_fast()", (double)time_taken_hc__fast / BILLION, (int)(iterations / ((double)time_taken_hc__fast /BILLION)), (int)time_taken_hc__fast / iterations, (double)time_taken_hc__fast * 100 / time_taken_hc__default); - printf(format, "Compressible", "LZ4_compress_fast_extState()", (double)time_taken_hc__fast_extstate / BILLION, (int)(iterations / ((double)time_taken_hc__fast_extstate /BILLION)), (int)time_taken_hc__fast_extstate / iterations, (double)time_taken_hc__fast_extstate * 100 / time_taken_hc__default); - //printf(format, "Compressible", "LZ4_compress_generic()", (double)time_taken_hc__generic / BILLION, (int)(iterations / ((double)time_taken_hc__generic /BILLION)), (int)time_taken_hc__generic / iterations, (double)time_taken_hc__generic * 100 / time_taken_hc__default); - printf(format, "Compressible", "LZ4_decompress_safe()", (double)time_taken_hc__decomp_safe / BILLION, (int)(iterations / ((double)time_taken_hc__decomp_safe /BILLION)), (int)time_taken_hc__decomp_safe / iterations, (double)time_taken_hc__decomp_safe * 100 / time_taken_hc__default); - printf(format, "Compressible", "LZ4_decompress_fast()", (double)time_taken_hc__decomp_fast / BILLION, (int)(iterations / ((double)time_taken_hc__decomp_fast /BILLION)), (int)time_taken_hc__decomp_fast / iterations, (double)time_taken_hc__decomp_fast * 100 / time_taken_hc__default); - printf("%s", separator); - printf("\n"); - printf("All done, ran %d iterations per test.\n", iterations); - return 0; -} diff --git a/librocksdb-sys/lz4/examples/dictionaryRandomAccess.c b/librocksdb-sys/lz4/examples/dictionaryRandomAccess.c deleted file mode 100644 index 3aa4609..0000000 --- a/librocksdb-sys/lz4/examples/dictionaryRandomAccess.c +++ /dev/null @@ -1,280 +0,0 @@ -// LZ4 API example : Dictionary Random Access - -#if defined(_MSC_VER) && (_MSC_VER <= 1800) /* Visual Studio <= 2013 */ -# define _CRT_SECURE_NO_WARNINGS -# define snprintf sprintf_s -#endif -#include "lz4.h" - -#include -#include -#include -#include - -#define MIN(x, y) ((x) < (y) ? (x) : (y)) - -enum { - BLOCK_BYTES = 1024, /* 1 KiB of uncompressed data in a block */ - DICTIONARY_BYTES = 1024, /* Load a 1 KiB dictionary */ - MAX_BLOCKS = 1024 /* For simplicity of implementation */ -}; - -/** - * Magic bytes for this test case. - * This is not a great magic number because it is a common word in ASCII. - * However, it is important to have some versioning system in your format. - */ -const char kTestMagic[] = { 'T', 'E', 'S', 'T' }; - - -void write_int(FILE* fp, int i) { - size_t written = fwrite(&i, sizeof(i), 1, fp); - if (written != 1) { exit(10); } -} - -void write_bin(FILE* fp, const void* array, size_t arrayBytes) { - size_t written = fwrite(array, 1, arrayBytes, fp); - if (written != arrayBytes) { exit(11); } -} - -void read_int(FILE* fp, int* i) { - size_t read = fread(i, sizeof(*i), 1, fp); - if (read != 1) { exit(12); } -} - -size_t read_bin(FILE* fp, void* array, size_t arrayBytes) { - size_t read = fread(array, 1, arrayBytes, fp); - if (ferror(fp)) { exit(12); } - return read; -} - -void seek_bin(FILE* fp, long offset, int origin) { - if (fseek(fp, offset, origin)) { exit(14); } -} - - -void test_compress(FILE* outFp, FILE* inpFp, void *dict, int dictSize) -{ - LZ4_stream_t lz4Stream_body; - LZ4_stream_t* lz4Stream = &lz4Stream_body; - - char inpBuf[BLOCK_BYTES]; - int offsets[MAX_BLOCKS]; - int *offsetsEnd = offsets; - - - LZ4_initStream(lz4Stream, sizeof(*lz4Stream)); - - /* Write header magic */ - write_bin(outFp, kTestMagic, sizeof(kTestMagic)); - - *offsetsEnd++ = sizeof(kTestMagic); - /* Write compressed data blocks. Each block contains BLOCK_BYTES of plain - data except possibly the last. */ - for(;;) { - const int inpBytes = (int) read_bin(inpFp, inpBuf, BLOCK_BYTES); - if(0 == inpBytes) { - break; - } - - /* Forget previously compressed data and load the dictionary */ - LZ4_loadDict(lz4Stream, (const char*) dict, dictSize); - { - char cmpBuf[LZ4_COMPRESSBOUND(BLOCK_BYTES)]; - const int cmpBytes = LZ4_compress_fast_continue( - lz4Stream, inpBuf, cmpBuf, inpBytes, sizeof(cmpBuf), 1); - if(cmpBytes <= 0) { exit(1); } - write_bin(outFp, cmpBuf, (size_t)cmpBytes); - /* Keep track of the offsets */ - *offsetsEnd = *(offsetsEnd - 1) + cmpBytes; - ++offsetsEnd; - } - if (offsetsEnd - offsets > MAX_BLOCKS) { exit(2); } - } - /* Write the tailing jump table */ - { - int *ptr = offsets; - while (ptr != offsetsEnd) { - write_int(outFp, *ptr++); - } - write_int(outFp, (int) (offsetsEnd - offsets)); - } -} - - -void test_decompress(FILE* outFp, FILE* inpFp, void *dict, int dictSize, int offset, int length) -{ - LZ4_streamDecode_t lz4StreamDecode_body; - LZ4_streamDecode_t* lz4StreamDecode = &lz4StreamDecode_body; - - /* The blocks [currentBlock, endBlock) contain the data we want */ - int currentBlock = offset / BLOCK_BYTES; - int endBlock = ((offset + length - 1) / BLOCK_BYTES) + 1; - - char decBuf[BLOCK_BYTES]; - int offsets[MAX_BLOCKS]; - - /* Special cases */ - if (length == 0) { return; } - - /* Read the magic bytes */ - { - char magic[sizeof(kTestMagic)]; - size_t read = read_bin(inpFp, magic, sizeof(magic)); - if (read != sizeof(magic)) { exit(1); } - if (memcmp(kTestMagic, magic, sizeof(magic))) { exit(2); } - } - - /* Read the offsets tail */ - { - int numOffsets; - int block; - int *offsetsPtr = offsets; - seek_bin(inpFp, -4, SEEK_END); - read_int(inpFp, &numOffsets); - if (numOffsets <= endBlock) { exit(3); } - seek_bin(inpFp, -4 * (numOffsets + 1), SEEK_END); - for (block = 0; block <= endBlock; ++block) { - read_int(inpFp, offsetsPtr++); - } - } - /* Seek to the first block to read */ - seek_bin(inpFp, offsets[currentBlock], SEEK_SET); - offset = offset % BLOCK_BYTES; - - /* Start decoding */ - for(; currentBlock < endBlock; ++currentBlock) { - char cmpBuf[LZ4_COMPRESSBOUND(BLOCK_BYTES)]; - /* The difference in offsets is the size of the block */ - int cmpBytes = offsets[currentBlock + 1] - offsets[currentBlock]; - { - const size_t read = read_bin(inpFp, cmpBuf, (size_t)cmpBytes); - if(read != (size_t)cmpBytes) { exit(4); } - } - - /* Load the dictionary */ - LZ4_setStreamDecode(lz4StreamDecode, (const char*) dict, dictSize); - { - const int decBytes = LZ4_decompress_safe_continue( - lz4StreamDecode, cmpBuf, decBuf, cmpBytes, BLOCK_BYTES); - if(decBytes <= 0) { exit(5); } - { - /* Write out the part of the data we care about */ - int blockLength = MIN(length, (decBytes - offset)); - write_bin(outFp, decBuf + offset, (size_t)blockLength); - offset = 0; - length -= blockLength; - } - } - } -} - - -int compare(FILE* fp0, FILE* fp1, int length) -{ - int result = 0; - - while(0 == result) { - char b0[4096]; - char b1[4096]; - const size_t r0 = read_bin(fp0, b0, MIN(length, (int)sizeof(b0))); - const size_t r1 = read_bin(fp1, b1, MIN(length, (int)sizeof(b1))); - - result = (int) r0 - (int) r1; - - if(0 == r0 || 0 == r1) { - break; - } - if(0 == result) { - result = memcmp(b0, b1, r0); - } - length -= r0; - } - - return result; -} - - -int main(int argc, char* argv[]) -{ - char inpFilename[256] = { 0 }; - char lz4Filename[256] = { 0 }; - char decFilename[256] = { 0 }; - char dictFilename[256] = { 0 }; - int offset; - int length; - char dict[DICTIONARY_BYTES]; - int dictSize; - - if(argc < 5) { - printf("Usage: %s input dictionary offset length", argv[0]); - return 0; - } - - snprintf(inpFilename, 256, "%s", argv[1]); - snprintf(lz4Filename, 256, "%s.lz4s-%d", argv[1], BLOCK_BYTES); - snprintf(decFilename, 256, "%s.lz4s-%d.dec", argv[1], BLOCK_BYTES); - snprintf(dictFilename, 256, "%s", argv[2]); - offset = atoi(argv[3]); - length = atoi(argv[4]); - - printf("inp = [%s]\n", inpFilename); - printf("lz4 = [%s]\n", lz4Filename); - printf("dec = [%s]\n", decFilename); - printf("dict = [%s]\n", dictFilename); - printf("offset = [%d]\n", offset); - printf("length = [%d]\n", length); - - /* Load dictionary */ - { - FILE* dictFp = fopen(dictFilename, "rb"); - dictSize = (int)read_bin(dictFp, dict, DICTIONARY_BYTES); - fclose(dictFp); - } - - /* compress */ - { - FILE* inpFp = fopen(inpFilename, "rb"); - FILE* outFp = fopen(lz4Filename, "wb"); - - printf("compress : %s -> %s\n", inpFilename, lz4Filename); - test_compress(outFp, inpFp, dict, dictSize); - printf("compress : done\n"); - - fclose(outFp); - fclose(inpFp); - } - - /* decompress */ - { - FILE* inpFp = fopen(lz4Filename, "rb"); - FILE* outFp = fopen(decFilename, "wb"); - - printf("decompress : %s -> %s\n", lz4Filename, decFilename); - test_decompress(outFp, inpFp, dict, DICTIONARY_BYTES, offset, length); - printf("decompress : done\n"); - - fclose(outFp); - fclose(inpFp); - } - - /* verify */ - { - FILE* inpFp = fopen(inpFilename, "rb"); - FILE* decFp = fopen(decFilename, "rb"); - seek_bin(inpFp, offset, SEEK_SET); - - printf("verify : %s <-> %s\n", inpFilename, decFilename); - const int cmp = compare(inpFp, decFp, length); - if(0 == cmp) { - printf("verify : OK\n"); - } else { - printf("verify : NG\n"); - } - - fclose(decFp); - fclose(inpFp); - } - - return 0; -} diff --git a/librocksdb-sys/lz4/examples/dictionaryRandomAccess.md b/librocksdb-sys/lz4/examples/dictionaryRandomAccess.md deleted file mode 100644 index c6f4388..0000000 --- a/librocksdb-sys/lz4/examples/dictionaryRandomAccess.md +++ /dev/null @@ -1,67 +0,0 @@ -# LZ4 API Example : Dictionary Random Access - -`dictionaryRandomAccess.c` is LZ4 API example which implements dictionary compression and random access decompression. - -Please note that the output file is not compatible with lz4frame and is platform dependent. - - -## What's the point of this example ? - - - Dictionary based compression for homogeneous files. - - Random access to compressed blocks. - - -## How the compression works - -Reads the dictionary from a file, and uses it as the history for each block. -This allows each block to be independent, but maintains compression ratio. - -``` - Dictionary - + - | - v - +---------+ - | Block#1 | - +----+----+ - | - v - {Out#1} - - - Dictionary - + - | - v - +---------+ - | Block#2 | - +----+----+ - | - v - {Out#2} -``` - -After writing the magic bytes `TEST` and then the compressed blocks, write out the jump table. -The last 4 bytes is an integer containing the number of blocks in the stream. -If there are `N` blocks, then just before the last 4 bytes is `N + 1` 4 byte integers containing the offsets at the beginning and end of each block. -Let `Offset#K` be the total number of bytes written after writing out `Block#K` *including* the magic bytes for simplicity. - -``` -+------+---------+ +---------+---+----------+ +----------+-----+ -| TEST | Block#1 | ... | Block#N | 4 | Offset#1 | ... | Offset#N | N+1 | -+------+---------+ +---------+---+----------+ +----------+-----+ -``` - -## How the decompression works - -Decompression will do reverse order. - - - Seek to the last 4 bytes of the file and read the number of offsets. - - Read each offset into an array. - - Seek to the first block containing data we want to read. - We know where to look because we know each block contains a fixed amount of uncompressed data, except possibly the last. - - Decompress it and write what data we need from it to the file. - - Read the next block. - - Decompress it and write that page to the file. - -Continue these procedures until all the required data has been read. diff --git a/librocksdb-sys/lz4/examples/fileCompress.c b/librocksdb-sys/lz4/examples/fileCompress.c deleted file mode 100644 index 4486ea8..0000000 --- a/librocksdb-sys/lz4/examples/fileCompress.c +++ /dev/null @@ -1,232 +0,0 @@ -/* LZ4file API example : compress a file - * Modified from an example code by anjiahao - * - * This example will demonstrate how - * to manipulate lz4 compressed files like - * normal files */ - -#include -#include -#include -#include -#include - -#include - - -#define CHUNK_SIZE (16*1024) - -static size_t get_file_size(char *filename) -{ - struct stat statbuf; - - if (filename == NULL) { - return 0; - } - - if(stat(filename,&statbuf)) { - return 0; - } - - return statbuf.st_size; -} - -static int compress_file(FILE* f_in, FILE* f_out) -{ - assert(f_in != NULL); assert(f_out != NULL); - - LZ4F_errorCode_t ret = LZ4F_OK_NoError; - size_t len; - LZ4_writeFile_t* lz4fWrite; - void* const buf = malloc(CHUNK_SIZE); - if (!buf) { - printf("error: memory allocation failed \n"); - } - - /* Of course, you can also use prefsPtr to - * set the parameters of the compressed file - * NULL is use default - */ - ret = LZ4F_writeOpen(&lz4fWrite, f_out, NULL); - if (LZ4F_isError(ret)) { - printf("LZ4F_writeOpen error: %s\n", LZ4F_getErrorName(ret)); - free(buf); - return 1; - } - - while (1) { - len = fread(buf, 1, CHUNK_SIZE, f_in); - - if (ferror(f_in)) { - printf("fread error\n"); - goto out; - } - - /* nothing to read */ - if (len == 0) { - break; - } - - ret = LZ4F_write(lz4fWrite, buf, len); - if (LZ4F_isError(ret)) { - printf("LZ4F_write: %s\n", LZ4F_getErrorName(ret)); - goto out; - } - } - -out: - free(buf); - if (LZ4F_isError(LZ4F_writeClose(lz4fWrite))) { - printf("LZ4F_writeClose: %s\n", LZ4F_getErrorName(ret)); - return 1; - } - - return 0; -} - -static int decompress_file(FILE* f_in, FILE* f_out) -{ - assert(f_in != NULL); assert(f_out != NULL); - - LZ4F_errorCode_t ret = LZ4F_OK_NoError; - LZ4_readFile_t* lz4fRead; - void* const buf= malloc(CHUNK_SIZE); - if (!buf) { - printf("error: memory allocation failed \n"); - } - - ret = LZ4F_readOpen(&lz4fRead, f_in); - if (LZ4F_isError(ret)) { - printf("LZ4F_readOpen error: %s\n", LZ4F_getErrorName(ret)); - free(buf); - return 1; - } - - while (1) { - ret = LZ4F_read(lz4fRead, buf, CHUNK_SIZE); - if (LZ4F_isError(ret)) { - printf("LZ4F_read error: %s\n", LZ4F_getErrorName(ret)); - goto out; - } - - /* nothing to read */ - if (ret == 0) { - break; - } - - if(fwrite(buf, 1, ret, f_out) != ret) { - printf("write error!\n"); - goto out; - } - } - -out: - free(buf); - if (LZ4F_isError(LZ4F_readClose(lz4fRead))) { - printf("LZ4F_readClose: %s\n", LZ4F_getErrorName(ret)); - return 1; - } - - if (ret) { - return 1; - } - - return 0; -} - -int compareFiles(FILE* fp0, FILE* fp1) -{ - int result = 0; - - while (result==0) { - char b0[1024]; - char b1[1024]; - size_t const r0 = fread(b0, 1, sizeof(b0), fp0); - size_t const r1 = fread(b1, 1, sizeof(b1), fp1); - - result = (r0 != r1); - if (!r0 || !r1) break; - if (!result) result = memcmp(b0, b1, r0); - } - - return result; -} - -int main(int argc, const char **argv) { - char inpFilename[256] = { 0 }; - char lz4Filename[256] = { 0 }; - char decFilename[256] = { 0 }; - - if (argc < 2) { - printf("Please specify input filename\n"); - return 0; - } - - snprintf(inpFilename, 256, "%s", argv[1]); - snprintf(lz4Filename, 256, "%s.lz4", argv[1]); - snprintf(decFilename, 256, "%s.lz4.dec", argv[1]); - - printf("inp = [%s]\n", inpFilename); - printf("lz4 = [%s]\n", lz4Filename); - printf("dec = [%s]\n", decFilename); - - /* compress */ - { FILE* const inpFp = fopen(inpFilename, "rb"); - FILE* const outFp = fopen(lz4Filename, "wb"); - printf("compress : %s -> %s\n", inpFilename, lz4Filename); - LZ4F_errorCode_t ret = compress_file(inpFp, outFp); - fclose(inpFp); - fclose(outFp); - - if (ret) { - printf("compression error: %s\n", LZ4F_getErrorName(ret)); - return 1; - } - - printf("%s: %zu → %zu bytes, %.1f%%\n", - inpFilename, - get_file_size(inpFilename), - get_file_size(lz4Filename), /* might overflow is size_t is 32 bits and size_{in,out} > 4 GB */ - (double)get_file_size(lz4Filename) / get_file_size(inpFilename) * 100); - - printf("compress : done\n"); - } - - /* decompress */ - { - FILE* const inpFp = fopen(lz4Filename, "rb"); - FILE* const outFp = fopen(decFilename, "wb"); - - printf("decompress : %s -> %s\n", lz4Filename, decFilename); - LZ4F_errorCode_t ret = decompress_file(inpFp, outFp); - - fclose(outFp); - fclose(inpFp); - - if (ret) { - printf("compression error: %s\n", LZ4F_getErrorName(ret)); - return 1; - } - - printf("decompress : done\n"); - } - - /* verify */ - { FILE* const inpFp = fopen(inpFilename, "rb"); - FILE* const decFp = fopen(decFilename, "rb"); - - printf("verify : %s <-> %s\n", inpFilename, decFilename); - int const cmp = compareFiles(inpFp, decFp); - - fclose(decFp); - fclose(inpFp); - - if (cmp) { - printf("corruption detected : decompressed file differs from original\n"); - return cmp; - } - - printf("verify : OK\n"); - } - -} diff --git a/librocksdb-sys/lz4/examples/frameCompress.c b/librocksdb-sys/lz4/examples/frameCompress.c deleted file mode 100644 index 25ff729..0000000 --- a/librocksdb-sys/lz4/examples/frameCompress.c +++ /dev/null @@ -1,489 +0,0 @@ -/* LZ4frame API example : compress a file - * Modified from an example code by Zbigniew Jędrzejewski-Szmek - * - * This example streams an input file into an output file - * using a bounded memory budget. - * Input is read in chunks of IN_CHUNK_SIZE */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#define IN_CHUNK_SIZE (16*1024) - -static const LZ4F_preferences_t kPrefs = { - { LZ4F_max256KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, - 0 /* unknown content size */, 0 /* no dictID */ , LZ4F_noBlockChecksum }, - 0, /* compression level; 0 == default */ - 0, /* autoflush */ - 0, /* favor decompression speed */ - { 0, 0, 0 }, /* reserved, must be set to 0 */ -}; - - -/* safe_fwrite() : - * performs fwrite(), ensure operation success, or immediately exit() */ -static void safe_fwrite(void* buf, size_t eltSize, size_t nbElt, FILE* f) -{ - size_t const writtenSize = fwrite(buf, eltSize, nbElt, f); - size_t const expectedSize = eltSize * nbElt; - if (nbElt>0) assert(expectedSize / nbElt == eltSize); /* check overflow */ - if (writtenSize < expectedSize) { - if (ferror(f)) /* note : ferror() must follow fwrite */ - fprintf(stderr, "Write failed \n"); - else - fprintf(stderr, "Write too short \n"); - exit(1); - } -} - - -/* ================================================= */ -/* Streaming Compression example */ -/* ================================================= */ - -typedef struct { - int error; - unsigned long long size_in; - unsigned long long size_out; -} compressResult_t; - -static compressResult_t -compress_file_internal(FILE* f_in, FILE* f_out, - LZ4F_compressionContext_t ctx, - void* inBuff, size_t inChunkSize, - void* outBuff, size_t outCapacity, - FILE* f_unc, long uncOffset) -{ - compressResult_t result = { 1, 0, 0 }; /* result for an error */ - long long count_in = 0, count_out, bytesToOffset = -1; - - assert(f_in != NULL); assert(f_out != NULL); - assert(ctx != NULL); - assert(outCapacity >= LZ4F_HEADER_SIZE_MAX); - assert(outCapacity >= LZ4F_compressBound(inChunkSize, &kPrefs)); - - /* write frame header */ - { size_t const headerSize = LZ4F_compressBegin(ctx, outBuff, outCapacity, &kPrefs); - if (LZ4F_isError(headerSize)) { - printf("Failed to start compression: error %u \n", (unsigned)headerSize); - return result; - } - count_out = headerSize; - printf("Buffer size is %u bytes, header size %u bytes \n", - (unsigned)outCapacity, (unsigned)headerSize); - safe_fwrite(outBuff, 1, headerSize, f_out); - } - - /* stream file */ - for (;;) { - size_t compressedSize; - long long inSize = IN_CHUNK_SIZE; - if (uncOffset >= 0) { - bytesToOffset = uncOffset - count_in; - - /* read only remaining bytes to offset position */ - if (bytesToOffset < IN_CHUNK_SIZE && bytesToOffset > 0) { - inSize = bytesToOffset; - } - } - - /* input data is at uncompressed data offset */ - if (bytesToOffset <= 0 && uncOffset >= 0 && f_unc) { - size_t const readSize = fread(inBuff, 1, inSize, f_unc); - if (readSize == 0) { - uncOffset = -1; - continue; - } - count_in += readSize; - compressedSize = LZ4F_uncompressedUpdate(ctx, - outBuff, outCapacity, - inBuff, readSize, - NULL); - } else { - size_t const readSize = fread(inBuff, 1, inSize, f_in); - if (readSize == 0) break; /* nothing left to read from input file */ - count_in += readSize; - compressedSize = LZ4F_compressUpdate(ctx, - outBuff, outCapacity, - inBuff, readSize, - NULL); - - } - - if (LZ4F_isError(compressedSize)) { - printf("Compression failed: error %u \n", (unsigned)compressedSize); - return result; - } - - printf("Writing %u bytes\n", (unsigned)compressedSize); - safe_fwrite(outBuff, 1, compressedSize, f_out); - count_out += compressedSize; - } - - /* flush whatever remains within internal buffers */ - { size_t const compressedSize = LZ4F_compressEnd(ctx, - outBuff, outCapacity, - NULL); - if (LZ4F_isError(compressedSize)) { - printf("Failed to end compression: error %u \n", (unsigned)compressedSize); - return result; - } - - printf("Writing %u bytes \n", (unsigned)compressedSize); - safe_fwrite(outBuff, 1, compressedSize, f_out); - count_out += compressedSize; - } - - result.size_in = count_in; - result.size_out = count_out; - result.error = 0; - return result; -} - -static compressResult_t -compress_file(FILE* f_in, FILE* f_out, - FILE* f_unc, int uncOffset) -{ - assert(f_in != NULL); - assert(f_out != NULL); - - /* resource allocation */ - LZ4F_compressionContext_t ctx; - size_t const ctxCreation = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION); - void* const src = malloc(IN_CHUNK_SIZE); - size_t const outbufCapacity = LZ4F_compressBound(IN_CHUNK_SIZE, &kPrefs); /* large enough for any input <= IN_CHUNK_SIZE */ - void* const outbuff = malloc(outbufCapacity); - - compressResult_t result = { 1, 0, 0 }; /* == error (default) */ - if (!LZ4F_isError(ctxCreation) && src && outbuff) { - result = compress_file_internal(f_in, f_out, - ctx, - src, IN_CHUNK_SIZE, - outbuff, outbufCapacity, - f_unc, uncOffset); - } else { - printf("error : resource allocation failed \n"); - } - - LZ4F_freeCompressionContext(ctx); /* supports free on NULL */ - free(src); - free(outbuff); - return result; -} - - -/* ================================================= */ -/* Streaming decompression example */ -/* ================================================= */ - -static size_t get_block_size(const LZ4F_frameInfo_t* info) { - switch (info->blockSizeID) { - case LZ4F_default: - case LZ4F_max64KB: return 1 << 16; - case LZ4F_max256KB: return 1 << 18; - case LZ4F_max1MB: return 1 << 20; - case LZ4F_max4MB: return 1 << 22; - default: - printf("Impossible with expected frame specification (<=v1.6.1)\n"); - exit(1); - } -} - -/* @return : 1==error, 0==success */ -static int -decompress_file_internal(FILE* f_in, FILE* f_out, - LZ4F_dctx* dctx, - void* src, size_t srcCapacity, size_t filled, size_t alreadyConsumed, - void* dst, size_t dstCapacity) -{ - int firstChunk = 1; - size_t ret = 1; - - assert(f_in != NULL); assert(f_out != NULL); - assert(dctx != NULL); - assert(src != NULL); assert(srcCapacity > 0); assert(filled <= srcCapacity); assert(alreadyConsumed <= filled); - assert(dst != NULL); assert(dstCapacity > 0); - - /* Decompression */ - while (ret != 0) { - /* Load more input */ - size_t readSize = firstChunk ? filled : fread(src, 1, srcCapacity, f_in); firstChunk=0; - const void* srcPtr = (const char*)src + alreadyConsumed; alreadyConsumed=0; - const void* const srcEnd = (const char*)srcPtr + readSize; - if (readSize == 0 || ferror(f_in)) { - printf("Decompress: not enough input or error reading file\n"); - return 1; - } - - /* Decompress: - * Continue while there is more input to read (srcPtr != srcEnd) - * and the frame isn't over (ret != 0) - */ - while (srcPtr < srcEnd && ret != 0) { - /* Any data within dst has been flushed at this stage */ - size_t dstSize = dstCapacity; - size_t srcSize = (const char*)srcEnd - (const char*)srcPtr; - ret = LZ4F_decompress(dctx, dst, &dstSize, srcPtr, &srcSize, /* LZ4F_decompressOptions_t */ NULL); - if (LZ4F_isError(ret)) { - printf("Decompression error: %s\n", LZ4F_getErrorName(ret)); - return 1; - } - /* Flush output */ - if (dstSize != 0) safe_fwrite(dst, 1, dstSize, f_out); - /* Update input */ - srcPtr = (const char*)srcPtr + srcSize; - } - - assert(srcPtr <= srcEnd); - - /* Ensure all input data has been consumed. - * It is valid to have multiple frames in the same file, - * but this example only supports one frame. - */ - if (srcPtr < srcEnd) { - printf("Decompress: Trailing data left in file after frame\n"); - return 1; - } - } - - /* Check that there isn't trailing data in the file after the frame. - * It is valid to have multiple frames in the same file, - * but this example only supports one frame. - */ - { size_t const readSize = fread(src, 1, 1, f_in); - if (readSize != 0 || !feof(f_in)) { - printf("Decompress: Trailing data left in file after frame\n"); - return 1; - } } - - return 0; -} - - -/* @return : 1==error, 0==completed */ -static int -decompress_file_allocDst(FILE* f_in, FILE* f_out, - LZ4F_dctx* dctx, - void* src, size_t srcCapacity) -{ - assert(f_in != NULL); assert(f_out != NULL); - assert(dctx != NULL); - assert(src != NULL); - assert(srcCapacity >= LZ4F_HEADER_SIZE_MAX); /* ensure LZ4F_getFrameInfo() can read enough data */ - - /* Read Frame header */ - size_t const readSize = fread(src, 1, srcCapacity, f_in); - if (readSize == 0 || ferror(f_in)) { - printf("Decompress: not enough input or error reading file\n"); - return 1; - } - - LZ4F_frameInfo_t info; - size_t consumedSize = readSize; - { size_t const fires = LZ4F_getFrameInfo(dctx, &info, src, &consumedSize); - if (LZ4F_isError(fires)) { - printf("LZ4F_getFrameInfo error: %s\n", LZ4F_getErrorName(fires)); - return 1; - } } - - /* Allocating enough space for an entire block isn't necessary for - * correctness, but it allows some memcpy's to be elided. - */ - size_t const dstCapacity = get_block_size(&info); - void* const dst = malloc(dstCapacity); - if (!dst) { perror("decompress_file(dst)"); return 1; } - - int const decompressionResult = decompress_file_internal( - f_in, f_out, - dctx, - src, srcCapacity, readSize-consumedSize, consumedSize, - dst, dstCapacity); - - free(dst); - return decompressionResult; -} - - -/* @result : 1==error, 0==success */ -static int decompress_file(FILE* f_in, FILE* f_out) -{ - assert(f_in != NULL); assert(f_out != NULL); - - /* Resource allocation */ - void* const src = malloc(IN_CHUNK_SIZE); - if (!src) { perror("decompress_file(src)"); return 1; } - - LZ4F_dctx* dctx; - { size_t const dctxStatus = LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION); - if (LZ4F_isError(dctxStatus)) { - printf("LZ4F_dctx creation error: %s\n", LZ4F_getErrorName(dctxStatus)); - } } - - int const result = !dctx ? 1 /* error */ : - decompress_file_allocDst(f_in, f_out, dctx, src, IN_CHUNK_SIZE); - - free(src); - LZ4F_freeDecompressionContext(dctx); /* note : free works on NULL */ - return result; -} - - -int compareFiles(FILE* fp0, FILE* fp1, FILE* fpUnc, long uncOffset) -{ - int result = 0; - long bytesRead = 0; - long bytesToOffset = -1; - long b1Size = 1024; - - while (result==0) { - char b1[b1Size]; - size_t r1; - size_t bytesToRead = sizeof b1; - if (uncOffset >= 0) { - bytesToOffset = uncOffset - bytesRead; - - /* read remainder to offset */ - if (bytesToOffset < b1Size) { - bytesToRead = bytesToOffset; - } - } - - char b0[1024]; - size_t r0; - if (bytesToOffset <= 0 && fpUnc) { - bytesToRead = sizeof b1; - r0 = fread(b0, 1,bytesToRead, fpUnc); - } else { - r0 = fread(b0, 1, bytesToRead, fp0); - } - - r1 = fread(b1, 1, r0, fp1); - - result = (r0 != r1); - if (!r0 || !r1) break; - if (!result) result = memcmp(b0, b1, r0); - - bytesRead += r1; - } - - return result; -} - - -int main(int argc, char **argv) { - char inpFilename[256] = { 0 }; - char lz4Filename[256] = { 0 }; - char decFilename[256] = { 0 }; - - int uncOffset = -1; - char uncFilename[256] = { 0 }; - int opt; - - if (argc < 2) { - printf("Please specify input filename\n"); - return EXIT_FAILURE; - } - - snprintf(inpFilename, 256, "%s", argv[1]); - snprintf(lz4Filename, 256, "%s.lz4", argv[1]); - snprintf(decFilename, 256, "%s.lz4.dec", argv[1]); - - while ((opt = getopt(argc, argv, "o:d:")) != -1) { - switch (opt) { - case 'd': - snprintf(uncFilename, 256, "%s", optarg); - break; - case 'o': - uncOffset = atoi(optarg); - break; - default: - printf("usage: %s [-o -d ]\n", argv[0]); - printf("-o uncompressed data offset\n"); - printf(" inject uncompressed data at this offset into the lz4 file\n"); - printf("-d uncompressed file\n"); - printf(" file to inject without compression into the lz4 file\n"); - return EXIT_FAILURE; - } - } - - printf("inp = [%s]\n", inpFilename); - printf("lz4 = [%s]\n", lz4Filename); - printf("dec = [%s]\n", decFilename); - if (uncOffset > 0) { - printf("unc = [%s]\n", uncFilename); - printf("ofs = [%i]\n", uncOffset); - } - - /* compress */ - { FILE* const inpFp = fopen(inpFilename, "rb"); - FILE* const outFp = fopen(lz4Filename, "wb"); - FILE* const uncFp = fopen(uncFilename, "rb"); - - printf("compress : %s -> %s\n", inpFilename, lz4Filename); - compressResult_t const ret = compress_file( - inpFp, outFp, - uncFp, uncOffset); - - fclose(outFp); - fclose(inpFp); - if (uncFp) - fclose(uncFp); - - if (ret.error) { - printf("compress : failed with code %i\n", ret.error); - return ret.error; - } - printf("%s: %zu → %zu bytes, %.1f%%\n", - inpFilename, - (size_t)ret.size_in, (size_t)ret.size_out, /* might overflow is size_t is 32 bits and size_{in,out} > 4 GB */ - (double)ret.size_out / ret.size_in * 100); - printf("compress : done\n"); - } - - /* decompress */ - { FILE* const inpFp = fopen(lz4Filename, "rb"); - FILE* const outFp = fopen(decFilename, "wb"); - - printf("decompress : %s -> %s\n", lz4Filename, decFilename); - int const ret = decompress_file(inpFp, outFp); - - fclose(outFp); - fclose(inpFp); - - if (ret) { - printf("decompress : failed with code %i\n", ret); - return ret; - } - printf("decompress : done\n"); - } - - /* verify */ - { FILE* const inpFp = fopen(inpFilename, "rb"); - FILE* const decFp = fopen(decFilename, "rb"); - FILE* const uncFp = fopen(uncFilename, "rb"); - - printf("verify : %s <-> %s\n", inpFilename, decFilename); - int const cmp = compareFiles(inpFp, decFp, - uncFp, uncOffset); - - fclose(decFp); - fclose(inpFp); - if (uncFp) - fclose(uncFp); - - if (cmp) { - printf("corruption detected : decompressed file differs from original\n"); - return cmp; - } - printf("verify : OK\n"); - } - - return 0; -} diff --git a/librocksdb-sys/lz4/examples/printVersion.c b/librocksdb-sys/lz4/examples/printVersion.c deleted file mode 100644 index 7af318a..0000000 --- a/librocksdb-sys/lz4/examples/printVersion.c +++ /dev/null @@ -1,13 +0,0 @@ -// LZ4 trivial example : print Library version number -// by Takayuki Matsuoka - - -#include -#include "lz4.h" - -int main(int argc, char** argv) -{ - (void)argc; (void)argv; - printf("Hello World ! LZ4 Library version = %d\n", LZ4_versionNumber()); - return 0; -} diff --git a/librocksdb-sys/lz4/examples/simple_buffer.c b/librocksdb-sys/lz4/examples/simple_buffer.c deleted file mode 100644 index f5c6eb2..0000000 --- a/librocksdb-sys/lz4/examples/simple_buffer.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * simple_buffer.c - * Copyright : Kyle Harper - * License : Follows same licensing as the lz4.c/lz4.h program at any given time. Currently, BSD 2. - * Description: Example program to demonstrate the basic usage of the compress/decompress functions within lz4.c/lz4.h. - * The functions you'll likely want are LZ4_compress_default and LZ4_decompress_safe. - * Both of these are documented in the lz4.h header file; I recommend reading them. - */ - -/* Dependencies */ -#include // For printf() -#include // For memcmp() -#include // For exit() -#include "lz4.h" // This is all that is required to expose the prototypes for basic compression and decompression. - -/* - * Simple show-error-and-bail function. - */ -void run_screaming(const char* message, const int code) { - printf("%s \n", message); - exit(code); -} - - -/* - * main - */ -int main(void) { - /* Introduction */ - // Below we will have a Compression and Decompression section to demonstrate. - // There are a few important notes before we start: - // 1) The return codes of LZ4_ functions are important. - // Read lz4.h if you're unsure what a given code means. - // 2) LZ4 uses char* pointers in all LZ4_ functions. - // This is baked into the API and not going to change, for consistency. - // If your program uses different pointer types, - // you may need to do some casting or set the right -Wno compiler flags to ignore those warnings (e.g.: -Wno-pointer-sign). - - /* Compression */ - // We'll store some text into a variable pointed to by *src to be compressed later. - const char* const src = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Lorem ipsum dolor site amat."; - // The compression function needs to know how many bytes exist. Since we're using a string, we can use strlen() + 1 (for \0). - const int src_size = (int)(strlen(src) + 1); - // LZ4 provides a function that will tell you the maximum size of compressed output based on input data via LZ4_compressBound(). - const int max_dst_size = LZ4_compressBound(src_size); - // We will use that size for our destination boundary when allocating space. - char* compressed_data = (char*)malloc((size_t)max_dst_size); - if (compressed_data == NULL) - run_screaming("Failed to allocate memory for *compressed_data.", 1); - // That's all the information and preparation LZ4 needs to compress *src into* compressed_data. - // Invoke LZ4_compress_default now with our size values and pointers to our memory locations. - // Save the return value for error checking. - const int compressed_data_size = LZ4_compress_default(src, compressed_data, src_size, max_dst_size); - // Check return_value to determine what happened. - if (compressed_data_size <= 0) - run_screaming("A 0 or negative result from LZ4_compress_default() indicates a failure trying to compress the data. ", 1); - if (compressed_data_size > 0) - printf("We successfully compressed some data! Ratio: %.2f\n", - (float) compressed_data_size/src_size); - // Not only does a positive return_value mean success, the value returned == the number of bytes required. - // You can use this to realloc() *compress_data to free up memory, if desired. We'll do so just to demonstrate the concept. - compressed_data = (char *)realloc(compressed_data, (size_t)compressed_data_size); - if (compressed_data == NULL) - run_screaming("Failed to re-alloc memory for compressed_data. Sad :(", 1); - - - /* Decompression */ - // Now that we've successfully compressed the information from *src to *compressed_data, let's do the opposite! - // The decompression will need to know the compressed size, and an upper bound of the decompressed size. - // In this example, we just re-use this information from previous section, - // but in a real-world scenario, metadata must be transmitted to the decompression side. - // Each implementation is in charge of this part. Oftentimes, it adds some header of its own. - // Sometimes, the metadata can be extracted from the local context. - - // First, let's create a *new_src location of size src_size since we know that value. - char* const regen_buffer = (char*)malloc(src_size); - if (regen_buffer == NULL) - run_screaming("Failed to allocate memory for *regen_buffer.", 1); - // The LZ4_decompress_safe function needs to know where the compressed data is, how many bytes long it is, - // where the regen_buffer memory location is, and how large regen_buffer (uncompressed) output will be. - // Again, save the return_value. - const int decompressed_size = LZ4_decompress_safe(compressed_data, regen_buffer, compressed_data_size, src_size); - free(compressed_data); /* no longer useful */ - if (decompressed_size < 0) - run_screaming("A negative result from LZ4_decompress_safe indicates a failure trying to decompress the data. See exit code (echo $?) for value returned.", decompressed_size); - if (decompressed_size >= 0) - printf("We successfully decompressed some data!\n"); - // Not only does a positive return value mean success, - // value returned == number of bytes regenerated from compressed_data stream. - if (decompressed_size != src_size) - run_screaming("Decompressed data is different from original! \n", 1); - - /* Validation */ - // We should be able to compare our original *src with our *new_src and be byte-for-byte identical. - if (memcmp(src, regen_buffer, src_size) != 0) - run_screaming("Validation failed. *src and *new_src are not identical.", 1); - printf("Validation done. The string we ended up with is:\n%s\n", regen_buffer); - return 0; -} diff --git a/librocksdb-sys/lz4/examples/streaming_api_basics.md b/librocksdb-sys/lz4/examples/streaming_api_basics.md deleted file mode 100644 index 6f5ae41..0000000 --- a/librocksdb-sys/lz4/examples/streaming_api_basics.md +++ /dev/null @@ -1,87 +0,0 @@ -# LZ4 Streaming API Basics -by *Takayuki Matsuoka* -## LZ4 API sets - -LZ4 has the following API sets : - - - "Auto Framing" API (lz4frame.h) : - This is most recommended API for usual application. - It guarantees interoperability with other LZ4 framing format compliant tools/libraries - such as LZ4 command line utility, node-lz4, etc. - - "Block" API : This is recommended for simple purpose. - It compresses single raw memory block to LZ4 memory block and vice versa. - - "Streaming" API : This is designed for complex things. - For example, compress huge stream data in restricted memory environment. - -Basically, you should use "Auto Framing" API. -But if you want to write advanced application, it's time to use Block or Streaming APIs. - - -## What is difference between Block and Streaming API ? - -Block API (de)compresses a single contiguous memory block. -In other words, LZ4 library finds redundancy from a single contiguous memory block. -Streaming API does same thing but (de)compresses multiple adjacent contiguous memory blocks. -So Streaming API could find more redundancy than Block API. - -The following figure shows difference between API and block sizes. -In these figures, the original data is split into 4KiBytes contiguous chunks. - -``` -Original Data - +---------------+---------------+----+----+----+ - | 4KiB Chunk A | 4KiB Chunk B | C | D |... | - +---------------+---------------+----+----+----+ - -Example (1) : Block API, 4KiB Block - +---------------+---------------+----+----+----+ - | 4KiB Chunk A | 4KiB Chunk B | C | D |... | - +---------------+---------------+----+----+----+ - | Block #1 | Block #2 | #3 | #4 |... | - +---------------+---------------+----+----+----+ - - (No Dependency) - - -Example (2) : Block API, 8KiB Block - +---------------+---------------+----+----+----+ - | 4KiB Chunk A | 4KiB Chunk B | C | D |... | - +---------------+---------------+----+----+----+ - | Block #1 |Block #2 |... | - +--------------------+----------+-------+-+----+ - ^ | ^ | - | | | | - +--------------+ +----+ - Internal Dependency Internal Dependency - - -Example (3) : Streaming API, 4KiB Block - +---------------+---------------+-----+----+----+ - | 4KiB Chunk A | 4KiB Chunk B | C | D |... | - +---------------+---------------+-----+----+----+ - | Block #1 | Block #2 | #3 | #4 |... | - +---------------+----+----------+-+---+-+--+----+ - ^ | ^ | ^ | - | | | | | | - +--------------+ +--------+ +---+ - Dependency Dependency Dependency -``` - - - In example (1), there is no dependency. - All blocks are compressed independently. - - In example (2), naturally 8KiBytes block has internal dependency. - But still block #1 and #2 are compressed independently. - - In example (3), block #2 has dependency to #1, - also #3 has dependency to #2 and #1, #4 has #3, #2 and #1, and so on. - -Here, we can observe difference between example (2) and (3). -In (2), there's no dependency between chunk B and C, but (3) has dependency between B and C. -This dependency improves compression ratio. - - -## Restriction of Streaming API - -For efficiency, Streaming API doesn't keep a mirror copy of dependent (de)compressed memory. -This means users should keep these dependent (de)compressed memory explicitly. -Usually, "Dependent memory" is previous adjacent contiguous memory up to 64KiBytes. -LZ4 will not access further memories. diff --git a/librocksdb-sys/lz4/lib/.gitignore b/librocksdb-sys/lz4/lib/.gitignore deleted file mode 100644 index 5d6f134..0000000 --- a/librocksdb-sys/lz4/lib/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# make install artefact -liblz4.pc diff --git a/librocksdb-sys/lz4/lib/LICENSE b/librocksdb-sys/lz4/lib/LICENSE deleted file mode 100644 index 4884916..0000000 --- a/librocksdb-sys/lz4/lib/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -LZ4 Library -Copyright (c) 2011-2020, Yann Collet -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/librocksdb-sys/lz4/lib/Makefile b/librocksdb-sys/lz4/lib/Makefile deleted file mode 100644 index 06503cb..0000000 --- a/librocksdb-sys/lz4/lib/Makefile +++ /dev/null @@ -1,225 +0,0 @@ -# ################################################################ -# LZ4 library - Makefile -# Copyright (C) Yann Collet 2011-2020 -# All rights reserved. -# -# This Makefile is validated for Linux, macOS, *BSD, Hurd, Solaris, MSYS2 targets -# -# BSD license -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, this -# list of conditions and the following disclaimer in the documentation and/or -# other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# You can contact the author at : -# - LZ4 source repository : https://github.com/lz4/lz4 -# - LZ4 forum froup : https://groups.google.com/forum/#!forum/lz4c -# ################################################################ -SED = sed - -# Version numbers -LIBVER_MAJOR_SCRIPT:=`$(SED) -n '/define LZ4_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./lz4.h` -LIBVER_MINOR_SCRIPT:=`$(SED) -n '/define LZ4_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./lz4.h` -LIBVER_PATCH_SCRIPT:=`$(SED) -n '/define LZ4_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./lz4.h` -LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) -LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) -LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) -LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) -LIBVER := $(shell echo $(LIBVER_SCRIPT)) - -BUILD_SHARED:=yes -BUILD_STATIC:=yes - -CPPFLAGS+= -DXXH_NAMESPACE=LZ4_ -CPPFLAGS+= $(MOREFLAGS) -CFLAGS ?= -O3 -DEBUGFLAGS:= -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ - -Wswitch-enum -Wdeclaration-after-statement -Wstrict-prototypes \ - -Wundef -Wpointer-arith -Wstrict-aliasing=1 -CFLAGS += $(DEBUGFLAGS) -FLAGS = $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) - -SRCFILES := $(sort $(wildcard *.c)) - -include ../Makefile.inc - -# OS X linker doesn't support -soname, and use different extension -# see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html -ifeq ($(TARGET_OS), Darwin) - SHARED_EXT = dylib - SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT) - SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT) - SONAME_FLAGS = -install_name $(libdir)/liblz4.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER) -else - SONAME_FLAGS = -Wl,-soname=liblz4.$(SHARED_EXT).$(LIBVER_MAJOR) - SHARED_EXT = so - SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR) - SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER) -endif - -.PHONY: default -default: lib-release - -# silent mode by default; verbose can be triggered by V=1 or VERBOSE=1 -$(V)$(VERBOSE).SILENT: - -lib-release: DEBUGFLAGS := -lib-release: lib - -.PHONY: lib -lib: liblz4.a liblz4 - -.PHONY: all -all: lib - -.PHONY: all32 -all32: CFLAGS+=-m32 -all32: all - -liblz4.a: $(SRCFILES) -ifeq ($(BUILD_STATIC),yes) # can be disabled on command line - @echo compiling static library - $(COMPILE.c) $^ - $(AR) rcs $@ *.o -endif - -ifeq ($(WINBASED),yes) -liblz4-dll.rc: liblz4-dll.rc.in - @echo creating library resource - $(SED) -e 's|@LIBLZ4@|$(LIBLZ4)|' \ - -e 's|@LIBVER_MAJOR@|$(LIBVER_MAJOR)|g' \ - -e 's|@LIBVER_MINOR@|$(LIBVER_MINOR)|g' \ - -e 's|@LIBVER_PATCH@|$(LIBVER_PATCH)|g' \ - $< >$@ - -liblz4-dll.o: liblz4-dll.rc - $(WINDRES) -i liblz4-dll.rc -o liblz4-dll.o - -$(LIBLZ4): $(SRCFILES) liblz4-dll.o - @echo compiling dynamic library $(LIBVER) - $(CC) $(FLAGS) -DLZ4_DLL_EXPORT=1 -shared $^ -o dll/$@.dll -Wl,--out-implib,dll/$(LIBLZ4_EXP) - -else # not windows - -$(LIBLZ4): $(SRCFILES) - @echo compiling dynamic library $(LIBVER) - $(CC) $(FLAGS) -shared $^ -fPIC -fvisibility=hidden $(SONAME_FLAGS) -o $@ - @echo creating versioned links - $(LN_SF) $@ liblz4.$(SHARED_EXT_MAJOR) - $(LN_SF) $@ liblz4.$(SHARED_EXT) - -endif - -.PHONY: liblz4 -liblz4: $(LIBLZ4) - -.PHONY: clean -clean: -ifeq ($(WINBASED),yes) - $(RM) *.rc -endif - $(RM) core *.o liblz4.pc dll/$(LIBLZ4).dll dll/$(LIBLZ4_EXP) - $(RM) *.a *.$(SHARED_EXT) *.$(SHARED_EXT_MAJOR) *.$(SHARED_EXT_VER) - @echo Cleaning library completed - -#----------------------------------------------------------------------------- -# make install is validated only for Linux, OSX, BSD, Hurd and Solaris targets -#----------------------------------------------------------------------------- -ifeq ($(POSIX_ENV),Yes) - -.PHONY: listL120 -listL120: # extract lines >= 120 characters in *.{c,h}, by Takayuki Matsuoka (note : $$, for Makefile compatibility) - find . -type f -name '*.c' -o -name '*.h' | while read -r filename; do awk 'length > 120 {print FILENAME "(" FNR "): " $$0}' $$filename; done - -DESTDIR ?= -# directory variables : GNU conventions prefer lowercase -# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html -# support both lower and uppercase (BSD), use lower in script -PREFIX ?= /usr/local -prefix ?= $(PREFIX) -EXEC_PREFIX ?= $(prefix) -exec_prefix ?= $(EXEC_PREFIX) -BINDIR ?= $(exec_prefix)/bin -bindir ?= $(BINDIR) -LIBDIR ?= $(exec_prefix)/lib -libdir ?= $(LIBDIR) -INCLUDEDIR ?= $(prefix)/include -includedir ?= $(INCLUDEDIR) - - ifneq (,$(filter $(TARGET_OS),OpenBSD FreeBSD NetBSD DragonFly MidnightBSD)) -PKGCONFIGDIR ?= $(prefix)/libdata/pkgconfig - else -PKGCONFIGDIR ?= $(libdir)/pkgconfig - endif -pkgconfigdir ?= $(PKGCONFIGDIR) - -liblz4.pc: liblz4.pc.in Makefile - @echo creating pkgconfig - $(SED) -e 's|@PREFIX@|$(prefix)|' \ - -e 's|@LIBDIR@|$(libdir)|' \ - -e 's|@INCLUDEDIR@|$(includedir)|' \ - -e 's|@VERSION@|$(LIBVER)|' \ - -e 's|=${prefix}/|=$${prefix}/|' \ - $< >$@ - -install: lib liblz4.pc - $(INSTALL_DIR) $(DESTDIR)$(pkgconfigdir)/ $(DESTDIR)$(includedir)/ $(DESTDIR)$(libdir)/ $(DESTDIR)$(bindir)/ - $(INSTALL_DATA) liblz4.pc $(DESTDIR)$(pkgconfigdir)/ - @echo Installing libraries in $(DESTDIR)$(libdir) - ifeq ($(BUILD_STATIC),yes) - $(INSTALL_DATA) liblz4.a $(DESTDIR)$(libdir)/liblz4.a - $(INSTALL_DATA) lz4frame_static.h $(DESTDIR)$(includedir)/lz4frame_static.h - endif - ifeq ($(BUILD_SHARED),yes) -# Traditionally, one installs the DLLs in the bin directory as programs -# search them first in their directory. This allows to not pollute system -# directories (like c:/windows/system32), nor modify the PATH variable. - ifeq ($(WINBASED),yes) - $(INSTALL_PROGRAM) dll/$(LIBLZ4).dll $(DESTDIR)$(bindir) - $(INSTALL_PROGRAM) dll/$(LIBLZ4_EXP) $(DESTDIR)$(libdir) - else - $(INSTALL_PROGRAM) liblz4.$(SHARED_EXT_VER) $(DESTDIR)$(libdir) - $(LN_SF) liblz4.$(SHARED_EXT_VER) $(DESTDIR)$(libdir)/liblz4.$(SHARED_EXT_MAJOR) - $(LN_SF) liblz4.$(SHARED_EXT_VER) $(DESTDIR)$(libdir)/liblz4.$(SHARED_EXT) - endif - endif - @echo Installing headers in $(DESTDIR)$(includedir) - $(INSTALL_DATA) lz4.h $(DESTDIR)$(includedir)/lz4.h - $(INSTALL_DATA) lz4hc.h $(DESTDIR)$(includedir)/lz4hc.h - $(INSTALL_DATA) lz4frame.h $(DESTDIR)$(includedir)/lz4frame.h - @echo lz4 libraries installed - -uninstall: - $(RM) $(DESTDIR)$(pkgconfigdir)/liblz4.pc - ifeq (WINBASED,1) - $(RM) $(DESTDIR)$(bindir)/$(LIBLZ4).dll - $(RM) $(DESTDIR)$(libdir)/$(LIBLZ4_EXP) - else - $(RM) $(DESTDIR)$(libdir)/liblz4.$(SHARED_EXT) - $(RM) $(DESTDIR)$(libdir)/liblz4.$(SHARED_EXT_MAJOR) - $(RM) $(DESTDIR)$(libdir)/liblz4.$(SHARED_EXT_VER) - endif - $(RM) $(DESTDIR)$(libdir)/liblz4.a - $(RM) $(DESTDIR)$(includedir)/lz4.h - $(RM) $(DESTDIR)$(includedir)/lz4hc.h - $(RM) $(DESTDIR)$(includedir)/lz4frame.h - $(RM) $(DESTDIR)$(includedir)/lz4frame_static.h - @echo lz4 libraries successfully uninstalled - -endif diff --git a/librocksdb-sys/lz4/lib/README.md b/librocksdb-sys/lz4/lib/README.md deleted file mode 100644 index 08d1cef..0000000 --- a/librocksdb-sys/lz4/lib/README.md +++ /dev/null @@ -1,169 +0,0 @@ -LZ4 - Library Files -================================ - -The `/lib` directory contains many files, but depending on project's objectives, -not all of them are required. -Limited systems may want to reduce the nb of source files to include -as a way to reduce binary size and dependencies. - -Capabilities are added at the "level" granularity, detailed below. - -#### Level 1 : Minimal LZ4 build - -The minimum required is **`lz4.c`** and **`lz4.h`**, -which provides the fast compression and decompression algorithms. -They generate and decode data using the [LZ4 block format]. - - -#### Level 2 : High Compression variant - -For more compression ratio at the cost of compression speed, -the High Compression variant called **lz4hc** is available. -Add files **`lz4hc.c`** and **`lz4hc.h`**. -This variant also compresses data using the [LZ4 block format], -and depends on regular `lib/lz4.*` source files. - - -#### Level 3 : Frame support, for interoperability - -In order to produce compressed data compatible with `lz4` command line utility, -it's necessary to use the [official interoperable frame format]. -This format is generated and decoded automatically by the **lz4frame** library. -Its public API is described in `lib/lz4frame.h`. -In order to work properly, lz4frame needs all other modules present in `/lib`, -including, lz4 and lz4hc, and also **xxhash**. -So it's necessary to also include `xxhash.c` and `xxhash.h`. - - -#### Level 4 : File compression operations - -As a helper around file operations, -the library has been recently extended with `lz4file.c` and `lz4file.h` -(still considered experimental at the time of this writing). -These helpers allow opening, reading, writing, and closing files -using transparent LZ4 compression / decompression. -As a consequence, using `lz4file` adds a dependency on ``. - -`lz4file` relies on `lz4frame` in order to produce compressed data -conformant to the [LZ4 Frame format] specification. -Consequently, to enable this capability, -it's necessary to include all `*.c` and `*.h` files from `lib/` directory. - - -#### Advanced / Experimental API - -Definitions which are not guaranteed to remain stable in future versions, -are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`. -As the name suggests, these definitions should only be invoked -in the context of static linking ***only***. -Otherwise, dependent application may fail on API or ABI break in the future. -The associated symbols are also not exposed by the dynamic library by default. -Should they be nonetheless needed, it's possible to force their publication -by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS` -and `LZ4F_PUBLISH_STATIC_FUNCTIONS`. - - -#### Build macros - -The following build macro can be selected to adjust source code behavior at compilation time : - -- `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus. - This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them. - It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor. - For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`, - and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`. - -- `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow. - Set to 65535 by default, which is the maximum value supported by lz4 format. - Reducing maximum distance will reduce opportunities for LZ4 to find matches, - hence will produce a worse compression ratio. - Setting a smaller max distance could allow compatibility with specific decoders with limited memory budget. - This build macro only influences the compressed output of the compressor. - -- `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning. - This is meant to invite users to update their source code. - Should this be a problem, it's generally possible to make the compiler ignore these warnings, - for example with `-Wno-deprecated-declarations` on `gcc`, - or `_CRT_SECURE_NO_WARNINGS` for Visual Studio. - This build macro offers another project-specific method - by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files. - -- `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths - by using bitcount instructions, generally implemented as fast single instructions in many cpus. - In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance, - it's possible to use an optimized software path instead. - This is achieved by setting this build macros. - In most cases, it's not expected to be necessary, - but it can be legitimately considered for less common platforms. - -- `LZ4_ALIGN_TEST` : alignment test ensures that the memory area - passed as argument to become a compression state is suitably aligned. - This test can be disabled if it proves flaky, by setting this value to 0. - -- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to ``'s `malloc()`, `calloc()` and `free()` - by user-defined functions, which must be named `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`. - User functions must be available at link time. - -- `LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION` : - Remove support of dynamic memory allocation. - For more details, see description of this macro in `lib/lz4.c`. - -- `LZ4_FREESTANDING` : by setting this build macro to 1, - LZ4/HC removes dependencies on the C standard library, - including allocation functions and `memmove()`, `memcpy()`, and `memset()`. - This build macro is designed to help use LZ4/HC in restricted environments - (embedded, bootloader, etc). - For more details, see description of this macro in `lib/lz4.h`. - - - -#### Amalgamation - -lz4 source code can be amalgamated into a single file. -One can combine all source code into `lz4_all.c` by using following command: -``` -cat lz4.c lz4hc.c lz4frame.c > lz4_all.c -``` -(`cat` file order is important) then compile `lz4_all.c`. -All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`. - - -#### Windows : using MinGW+MSYS to create DLL - -DLL can be created using MinGW+MSYS with the `make liblz4` command. -This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`. -To override the `dlltool` command when cross-compiling on Linux, just set the `DLLTOOL` variable. Example of cross compilation on Linux with mingw-w64 64 bits: -``` -make BUILD_STATIC=no CC=x86_64-w64-mingw32-gcc DLLTOOL=x86_64-w64-mingw32-dlltool OS=Windows_NT -``` -The import library is only required with Visual C++. -The header files `lz4.h`, `lz4hc.h`, `lz4frame.h` and the dynamic library -`dll\liblz4.dll` are required to compile a project using gcc/MinGW. -The dynamic library has to be added to linking options. -It means that if a project that uses LZ4 consists of a single `test-dll.c` -file it should be linked with `dll\liblz4.dll`. For example: -``` - $(CC) $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll -``` -The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`. - - -#### Miscellaneous - -Other files present in the directory are not source code. They are : - - - `LICENSE` : contains the BSD license text - - `Makefile` : `make` script to compile and install lz4 library (static and dynamic) - - `liblz4.pc.in` : for `pkg-config` (used in `make install`) - - `README.md` : this file - -[official interoperable frame format]: ../doc/lz4_Frame_format.md -[LZ4 Frame format]: ../doc/lz4_Frame_format.md -[LZ4 block format]: ../doc/lz4_Block_format.md - - -#### License - -All source material within __lib__ directory are BSD 2-Clause licensed. -See [LICENSE](LICENSE) for details. -The license is also reminded at the top of each source file. diff --git a/librocksdb-sys/lz4/lib/dll/example/Makefile b/librocksdb-sys/lz4/lib/dll/example/Makefile deleted file mode 100644 index eb8cc1e..0000000 --- a/librocksdb-sys/lz4/lib/dll/example/Makefile +++ /dev/null @@ -1,63 +0,0 @@ -# ########################################################################## -# LZ4 programs - Makefile -# Copyright (C) Yann Collet 2016-2020 -# -# GPL v2 License -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# You can contact the author at : -# - LZ4 homepage : http://www.lz4.org -# - LZ4 source repository : https://github.com/lz4/lz4 -# ########################################################################## - -VOID := /dev/null -LZ4DIR := ../include -LIBDIR := ../static -DLLDIR := ../dll - -CFLAGS ?= -O3 # can select custom flags. For example : CFLAGS="-O2 -g" make -CFLAGS += -Wall -Wextra -Wundef -Wcast-qual -Wcast-align -Wshadow -Wswitch-enum \ - -Wdeclaration-after-statement -Wstrict-prototypes \ - -Wpointer-arith -Wstrict-aliasing=1 -CFLAGS += $(MOREFLAGS) -CPPFLAGS:= -I$(LZ4DIR) -DXXH_NAMESPACE=LZ4_ -FLAGS := $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) - - -# Define *.exe as extension for Windows systems -ifneq (,$(filter Windows%,$(OS))) -EXT =.exe -else -EXT = -endif - -.PHONY: default fullbench-dll fullbench-lib - - -default: all - -all: fullbench-dll fullbench-lib - - -fullbench-lib: fullbench.c xxhash.c - $(CC) $(FLAGS) $^ -o $@$(EXT) $(LIBDIR)/liblz4_static.lib - -fullbench-dll: fullbench.c xxhash.c - $(CC) $(FLAGS) $^ -o $@$(EXT) -DLZ4_DLL_IMPORT=1 $(DLLDIR)/liblz4.dll - -clean: - @$(RM) fullbench-dll$(EXT) fullbench-lib$(EXT) \ - @echo Cleaning completed diff --git a/librocksdb-sys/lz4/lib/dll/example/README.md b/librocksdb-sys/lz4/lib/dll/example/README.md deleted file mode 100644 index b93914b..0000000 --- a/librocksdb-sys/lz4/lib/dll/example/README.md +++ /dev/null @@ -1,69 +0,0 @@ -LZ4 Windows binary package -==================================== - -#### The package contents - -- `lz4.exe` : Command Line Utility, supporting gzip-like arguments -- `dll\msys-lz4-1.dll` : The DLL of LZ4 library, compiled by msys -- `dll\liblz4.dll.a` : The import library of LZ4 library for Visual C++ -- `example\` : The example of usage of LZ4 library -- `include\` : Header files required with LZ4 library -- `static\liblz4_static.lib` : The static LZ4 library - - -#### Usage of Command Line Interface - -Command Line Interface (CLI) supports gzip-like arguments. -By default CLI takes an input file and compresses it to an output file: -``` - Usage: lz4 [arg] [input] [output] -``` -The full list of commands for CLI can be obtained with `-h` or `-H`. The ratio can -be improved with commands from `-3` to `-16` but higher levels also have slower -compression. CLI includes in-memory compression benchmark module with compression -levels starting from `-b` and ending with `-e` with iteration time of `-i` seconds. -CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined -into `-b1e18i1`. - - -#### The example of usage of static and dynamic LZ4 libraries with gcc/MinGW - -Use `cd example` and `make` to build `fullbench-dll` and `fullbench-lib`. -`fullbench-dll` uses a dynamic LZ4 library from the `dll` directory. -`fullbench-lib` uses a static LZ4 library from the `lib` directory. - - -#### Using LZ4 DLL with gcc/MinGW - -The header files from `include\` and the dynamic library `dll\msys-lz4-1.dll` -are required to compile a project using gcc/MinGW. -The dynamic library has to be added to linking options. -It means that if a project that uses LZ4 consists of a single `test-dll.c` -file it should be linked with `dll\msys-lz4-1.dll`. For example: -``` - gcc $(CFLAGS) -Iinclude\ test-dll.c -o test-dll dll\msys-lz4-1.dll -``` -The compiled executable will require LZ4 DLL which is available at `dll\msys-lz4-1.dll`. - - -#### The example of usage of static and dynamic LZ4 libraries with Visual C++ - -Open `example\fullbench-dll.sln` to compile `fullbench-dll` that uses a -dynamic LZ4 library from the `dll` directory. The solution works with Visual C++ -2010 or newer. When one will open the solution with Visual C++ newer than 2010 -then the solution will be upgraded to the current version. - - -#### Using LZ4 DLL with Visual C++ - -The header files from `include\` and the import library `dll\liblz4.dll.a` -are required to compile a project using Visual C++. - -1. The header files should be added to `Additional Include Directories` that can - be found in project properties `C/C++` then `General`. -2. The import library has to be added to `Additional Dependencies` that can - be found in project properties `Linker` then `Input`. - If one will provide only the name `liblz4.dll.a` without a full path to the library - the directory has to be added to `Linker\General\Additional Library Directories`. - -The compiled executable will require LZ4 DLL which is available at `dll\msys-lz4-1.dll`. diff --git a/librocksdb-sys/lz4/lib/dll/example/fullbench-dll.sln b/librocksdb-sys/lz4/lib/dll/example/fullbench-dll.sln deleted file mode 100644 index 72e302e..0000000 --- a/librocksdb-sys/lz4/lib/dll/example/fullbench-dll.sln +++ /dev/null @@ -1,25 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Express 2012 for Windows Desktop -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fullbench-dll", "fullbench-dll.vcxproj", "{13992FD2-077E-4954-B065-A428198201A9}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.ActiveCfg = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|Win32.Build.0 = Debug|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.ActiveCfg = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Debug|x64.Build.0 = Debug|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.ActiveCfg = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|Win32.Build.0 = Release|Win32 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.ActiveCfg = Release|x64 - {13992FD2-077E-4954-B065-A428198201A9}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/librocksdb-sys/lz4/lib/dll/example/fullbench-dll.vcxproj b/librocksdb-sys/lz4/lib/dll/example/fullbench-dll.vcxproj deleted file mode 100644 index cdb5534..0000000 --- a/librocksdb-sys/lz4/lib/dll/example/fullbench-dll.vcxproj +++ /dev/null @@ -1,182 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {13992FD2-077E-4954-B065-A428198201A9} - Win32Proj - fullbench-dll - $(SolutionDir)bin\$(Platform)_$(Configuration)\ - $(SolutionDir)bin\obj\$(RootNamespace)_$(Platform)_$(Configuration)\ - - - - Application - true - Unicode - - - Application - true - Unicode - - - Application - false - true - Unicode - - - Application - false - true - Unicode - - - - - - - - - - - - - - - - - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - true - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - - - false - $(IncludePath);$(UniversalCRT_IncludePath);$(SolutionDir)..\..\lib;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSDK_IncludePath); - true - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - false - ..\include - - - Console - true - $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - false - - - - - - - Level4 - Disabled - WIN32;_DEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - true - true - /analyze:stacksize295252 %(AdditionalOptions) - ..\include - - - Console - true - $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - false - ..\include - - - Console - true - true - true - $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - false - - - - - Level4 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;LZ4_DLL_IMPORT=1;%(PreprocessorDefinitions) - false - true - /analyze:stacksize295252 %(AdditionalOptions) - ..\include - - - Console - true - true - true - $(SolutionDir)..\dll;%(AdditionalLibraryDirectories) - liblz4.lib;%(AdditionalDependencies) - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/librocksdb-sys/lz4/lib/liblz4-dll.rc.in b/librocksdb-sys/lz4/lib/liblz4-dll.rc.in deleted file mode 100644 index e2d84b6..0000000 --- a/librocksdb-sys/lz4/lib/liblz4-dll.rc.in +++ /dev/null @@ -1,35 +0,0 @@ -#include - -// DLL version information. -1 VERSIONINFO -FILEVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0 -PRODUCTVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0 -FILEFLAGSMASK VS_FFI_FILEFLAGSMASK -#ifdef _DEBUG - FILEFLAGS VS_FF_DEBUG | VS_FF_PRERELEASE -#else - FILEFLAGS 0 -#endif -FILEOS VOS_NT_WINDOWS32 -FILETYPE VFT_DLL -FILESUBTYPE VFT2_UNKNOWN -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904B0" - BEGIN - VALUE "CompanyName", "Yann Collet" - VALUE "FileDescription", "Extremely fast compression" - VALUE "FileVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0" - VALUE "InternalName", "@LIBLZ4@" - VALUE "LegalCopyright", "Copyright (C) 2013-2020, Yann Collet" - VALUE "OriginalFilename", "@LIBLZ4@.dll" - VALUE "ProductName", "LZ4" - VALUE "ProductVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0409, 1200 - END -END diff --git a/librocksdb-sys/lz4/lib/liblz4.pc.in b/librocksdb-sys/lz4/lib/liblz4.pc.in deleted file mode 100644 index ed52214..0000000 --- a/librocksdb-sys/lz4/lib/liblz4.pc.in +++ /dev/null @@ -1,14 +0,0 @@ -# LZ4 - Fast LZ compression algorithm -# Copyright (C) 2011-2020, Yann Collet. -# BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - -prefix=@PREFIX@ -libdir=@LIBDIR@ -includedir=@INCLUDEDIR@ - -Name: lz4 -Description: extremely fast lossless compression algorithm library -URL: http://www.lz4.org/ -Version: @VERSION@ -Libs: -L${libdir} -llz4 -Cflags: -I${includedir} diff --git a/librocksdb-sys/lz4/lib/lz4.c b/librocksdb-sys/lz4/lib/lz4.c deleted file mode 100644 index 654bfdf..0000000 --- a/librocksdb-sys/lz4/lib/lz4.c +++ /dev/null @@ -1,2722 +0,0 @@ -/* - LZ4 - Fast LZ compression algorithm - Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repository : https://github.com/lz4/lz4 -*/ - -/*-************************************ -* Tuning parameters -**************************************/ -/* - * LZ4_HEAPMODE : - * Select how default compression functions will allocate memory for their hash table, - * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). - */ -#ifndef LZ4_HEAPMODE -# define LZ4_HEAPMODE 0 -#endif - -/* - * LZ4_ACCELERATION_DEFAULT : - * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 - */ -#define LZ4_ACCELERATION_DEFAULT 1 -/* - * LZ4_ACCELERATION_MAX : - * Any "acceleration" value higher than this threshold - * get treated as LZ4_ACCELERATION_MAX instead (fix #876) - */ -#define LZ4_ACCELERATION_MAX 65537 - - -/*-************************************ -* CPU Feature Detection -**************************************/ -/* LZ4_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets which assembly generation depends on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ -# if defined(__GNUC__) && \ - ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ - || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define LZ4_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) -# define LZ4_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/* - * LZ4_FORCE_SW_BITCOUNT - * Define this parameter if your target system or compiler does not support hardware bit count - */ -#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ -# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */ -# define LZ4_FORCE_SW_BITCOUNT -#endif - - - -/*-************************************ -* Dependency -**************************************/ -/* - * LZ4_SRC_INCLUDED: - * Amalgamation flag, whether lz4.c is included - */ -#ifndef LZ4_SRC_INCLUDED -# define LZ4_SRC_INCLUDED 1 -#endif - -#ifndef LZ4_STATIC_LINKING_ONLY -#define LZ4_STATIC_LINKING_ONLY -#endif - -#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS -#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ -#endif - -#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */ -#include "lz4.h" -/* see also "memory routines" below */ - - -/*-************************************ -* Compiler Options -**************************************/ -#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */ -# include /* only present in VS2005+ */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */ -#endif /* _MSC_VER */ - -#ifndef LZ4_FORCE_INLINE -# ifdef _MSC_VER /* Visual Studio */ -# define LZ4_FORCE_INLINE static __forceinline -# else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define LZ4_FORCE_INLINE static inline -# endif -# else -# define LZ4_FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -# endif /* _MSC_VER */ -#endif /* LZ4_FORCE_INLINE */ - -/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE - * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, - * together with a simple 8-byte copy loop as a fall-back path. - * However, this optimization hurts the decompression speed by >30%, - * because the execution does not go to the optimized loop - * for typical compressible data, and all of the preamble checks - * before going to the fall-back path become useless overhead. - * This optimization happens only with the -O3 flag, and -O2 generates - * a simple 8-byte copy loop. - * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 - * functions are annotated with __attribute__((optimize("O2"))), - * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute - * of LZ4_wildCopy8 does not affect the compression speed. - */ -#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) -# define LZ4_FORCE_O2 __attribute__((optimize("O2"))) -# undef LZ4_FORCE_INLINE -# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline)) -#else -# define LZ4_FORCE_O2 -#endif - -#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) -# define expect(expr,value) (__builtin_expect ((expr),(value)) ) -#else -# define expect(expr,value) (expr) -#endif - -#ifndef likely -#define likely(expr) expect((expr) != 0, 1) -#endif -#ifndef unlikely -#define unlikely(expr) expect((expr) != 0, 0) -#endif - -/* Should the alignment test prove unreliable, for some reason, - * it can be disabled by setting LZ4_ALIGN_TEST to 0 */ -#ifndef LZ4_ALIGN_TEST /* can be externally provided */ -# define LZ4_ALIGN_TEST 1 -#endif - - -/*-************************************ -* Memory routines -**************************************/ - -/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION : - * Disable relatively high-level LZ4/HC functions that use dynamic memory - * allocation functions (malloc(), calloc(), free()). - * - * Note that this is a compile-time switch. And since it disables - * public/stable LZ4 v1 API functions, we don't recommend using this - * symbol to generate a library for distribution. - * - * The following public functions are removed when this symbol is defined. - * - lz4 : LZ4_createStream, LZ4_freeStream, - * LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated) - * - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC, - * LZ4_createHC (deprecated), LZ4_freeHC (deprecated) - * - lz4frame, lz4file : All LZ4F_* functions - */ -#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -# define ALLOC(s) lz4_error_memory_allocation_is_disabled -# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled -# define FREEMEM(p) lz4_error_memory_allocation_is_disabled -#elif defined(LZ4_USER_MEMORY_FUNCTIONS) -/* memory management functions can be customized by user project. - * Below functions must exist somewhere in the Project - * and be available at link time */ -void* LZ4_malloc(size_t s); -void* LZ4_calloc(size_t n, size_t s); -void LZ4_free(void* p); -# define ALLOC(s) LZ4_malloc(s) -# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s) -# define FREEMEM(p) LZ4_free(p) -#else -# include /* malloc, calloc, free */ -# define ALLOC(s) malloc(s) -# define ALLOC_AND_ZERO(s) calloc(1,s) -# define FREEMEM(p) free(p) -#endif - -#if ! LZ4_FREESTANDING -# include /* memset, memcpy */ -#endif -#if !defined(LZ4_memset) -# define LZ4_memset(p,v,s) memset((p),(v),(s)) -#endif -#define MEM_INIT(p,v,s) LZ4_memset((p),(v),(s)) - - -/*-************************************ -* Common Constants -**************************************/ -#define MINMATCH 4 - -#define WILDCOPYLENGTH 8 -#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ -#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ -#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ -#define FASTLOOP_SAFE_DISTANCE 64 -static const int LZ4_minLength = (MFLIMIT+1); - -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 -#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ -# error "LZ4_DISTANCE_MAX is too big : must be <= 65535" -#endif - -#define ML_BITS 4 -#define ML_MASK ((1U<=1) -# include -#else -# ifndef assert -# define assert(condition) ((void)0) -# endif -#endif - -#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ - -#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) -# include - static int g_debuglog_enable = 1; -# define DEBUGLOG(l, ...) { \ - if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ - fprintf(stderr, __FILE__ ": "); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, " \n"); \ - } } -#else -# define DEBUGLOG(l, ...) {} /* disabled */ -#endif - -static int LZ4_isAligned(const void* ptr, size_t alignment) -{ - return ((size_t)ptr & (alignment -1)) == 0; -} - - -/*-************************************ -* Types -**************************************/ -#include -#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; - typedef uintptr_t uptrval; -#else -# if UINT_MAX != 4294967295UL -# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4" -# endif - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; - typedef size_t uptrval; /* generally true, except OpenVMS-64 */ -#endif - -#if defined(__x86_64__) - typedef U64 reg_t; /* 64-bits in x32 mode */ -#else - typedef size_t reg_t; /* 32-bits in x32 mode */ -#endif - -typedef enum { - notLimited = 0, - limitedOutput = 1, - fillOutput = 2 -} limitedOutput_directive; - - -/*-************************************ -* Reading and writing into memory -**************************************/ - -/** - * LZ4 relies on memcpy with a constant size being inlined. In freestanding - * environments, the compiler can't assume the implementation of memcpy() is - * standard compliant, so it can't apply its specialized memcpy() inlining - * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze - * memcpy() as if it were standard compliant, so it can inline it in freestanding - * environments. This is needed when decompressing the Linux Kernel, for example. - */ -#if !defined(LZ4_memcpy) -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size) -# else -# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size) -# endif -#endif - -#if !defined(LZ4_memmove) -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4_memmove __builtin_memmove -# else -# define LZ4_memmove memmove -# endif -#endif - -static unsigned LZ4_isLittleEndian(void) -{ - const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; -} - - -#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) -/* lie to the compiler about data alignment; use with caution */ - -static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; } -static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; } -static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; } - -static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } -static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } - -#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign; - -static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; } -static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; } -static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; } - -static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; } -static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; } - -#else /* safe and portable access using memcpy() */ - -static U16 LZ4_read16(const void* memPtr) -{ - U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; -} - -static U32 LZ4_read32(const void* memPtr) -{ - U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; -} - -static reg_t LZ4_read_ARCH(const void* memPtr) -{ - reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val; -} - -static void LZ4_write16(void* memPtr, U16 value) -{ - LZ4_memcpy(memPtr, &value, sizeof(value)); -} - -static void LZ4_write32(void* memPtr, U32 value) -{ - LZ4_memcpy(memPtr, &value, sizeof(value)); -} - -#endif /* LZ4_FORCE_MEMORY_ACCESS */ - - -static U16 LZ4_readLE16(const void* memPtr) -{ - if (LZ4_isLittleEndian()) { - return LZ4_read16(memPtr); - } else { - const BYTE* p = (const BYTE*)memPtr; - return (U16)((U16)p[0] + (p[1]<<8)); - } -} - -static void LZ4_writeLE16(void* memPtr, U16 value) -{ - if (LZ4_isLittleEndian()) { - LZ4_write16(memPtr, value); - } else { - BYTE* p = (BYTE*)memPtr; - p[0] = (BYTE) value; - p[1] = (BYTE)(value>>8); - } -} - -/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ -LZ4_FORCE_INLINE -void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) -{ - BYTE* d = (BYTE*)dstPtr; - const BYTE* s = (const BYTE*)srcPtr; - BYTE* const e = (BYTE*)dstEnd; - - do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */ -LZ4_FORCE_INLINE void -LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) -{ - BYTE* d = (BYTE*)dstPtr; - const BYTE* s = (const BYTE*)srcPtr; - BYTE* const e = (BYTE*)dstEnd; - - do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH - * - there is at least 8 bytes available to write after dstEnd */ -LZ4_FORCE_INLINE void -LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) -{ - BYTE v[8]; - - assert(dstEnd >= dstPtr + MINMATCH); - - switch(offset) { - case 1: - MEM_INIT(v, *srcPtr, 8); - break; - case 2: - LZ4_memcpy(v, srcPtr, 2); - LZ4_memcpy(&v[2], srcPtr, 2); -#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */ -# pragma warning(push) -# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */ -#endif - LZ4_memcpy(&v[4], v, 4); -#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */ -# pragma warning(pop) -#endif - break; - case 4: - LZ4_memcpy(v, srcPtr, 4); - LZ4_memcpy(&v[4], srcPtr, 4); - break; - default: - LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); - return; - } - - LZ4_memcpy(dstPtr, v, 8); - dstPtr += 8; - while (dstPtr < dstEnd) { - LZ4_memcpy(dstPtr, v, 8); - dstPtr += 8; - } -} -#endif - - -/*-************************************ -* Common functions -**************************************/ -static unsigned LZ4_NbCommonBytes (reg_t val) -{ - assert(val != 0); - if (LZ4_isLittleEndian()) { - if (sizeof(val) == 8) { -# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT) -/*-************************************************************************************************* -* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11. -* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics -* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC. -****************************************************************************************************/ -# if defined(__clang__) && (__clang_major__ < 10) - /* Avoid undefined clang-cl intrinsics issue. - * See https://github.com/lz4/lz4/pull/1017 for details. */ - return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3; -# else - /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */ - return (unsigned)_tzcnt_u64(val) >> 3; -# endif -# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r = 0; - _BitScanForward64(&r, (U64)val); - return (unsigned)r >> 3; -# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_ctzll((U64)val) >> 3; -# else - const U64 m = 0x0101010101010101ULL; - val ^= val - 1; - return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56); -# endif - } else /* 32 bits */ { -# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT) - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)r >> 3; -# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_ctz((U32)val) >> 3; -# else - const U32 m = 0x01010101; - return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24; -# endif - } - } else /* Big Endian CPU */ { - if (sizeof(val)==8) { -# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_clzll((U64)val) >> 3; -# else -#if 1 - /* this method is probably faster, - * but adds a 128 bytes lookup table */ - static const unsigned char ctz7_tab[128] = { - 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, - }; - U64 const mask = 0x0101010101010101ULL; - U64 const t = (((val >> 8) - mask) | val) & mask; - return ctz7_tab[(t * 0x0080402010080402ULL) >> 57]; -#else - /* this method doesn't consume memory space like the previous one, - * but it contains several branches, - * that may end up slowing execution */ - static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. - Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. - Note that this code path is never triggered in 32-bits mode. */ - unsigned r; - if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -#endif -# endif - } else /* 32 bits */ { -# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \ - ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \ - !defined(LZ4_FORCE_SW_BITCOUNT) - return (unsigned)__builtin_clz((U32)val) >> 3; -# else - val >>= 8; - val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) | - (val + 0x00FF0000)) >> 24; - return (unsigned)val ^ 3; -# endif - } - } -} - - -#define STEPSIZE sizeof(reg_t) -LZ4_FORCE_INLINE -unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) -{ - const BYTE* const pStart = pIn; - - if (likely(pIn < pInLimit-(STEPSIZE-1))) { - reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); - if (!diff) { - pIn+=STEPSIZE; pMatch+=STEPSIZE; - } else { - return LZ4_NbCommonBytes(diff); - } } - - while (likely(pIn < pInLimit-(STEPSIZE-1))) { - reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); - if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; } - pIn += LZ4_NbCommonBytes(diff); - return (unsigned)(pIn - pStart); - } - - if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; } - if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; } - if ((pIn compression run slower on incompressible data */ - - -/*-************************************ -* Local Structures and types -**************************************/ -typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; - -/** - * This enum distinguishes several different modes of accessing previous - * content in the stream. - * - * - noDict : There is no preceding content. - * - withPrefix64k : Table entries up to ctx->dictSize before the current blob - * blob being compressed are valid and refer to the preceding - * content (of length ctx->dictSize), which is available - * contiguously preceding in memory the content currently - * being compressed. - * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere - * else in memory, starting at ctx->dictionary with length - * ctx->dictSize. - * - usingDictCtx : Everything concerning the preceding content is - * in a separate context, pointed to by ctx->dictCtx. - * ctx->dictionary, ctx->dictSize, and table entries - * in the current context that refer to positions - * preceding the beginning of the current compression are - * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx - * ->dictSize describe the location and size of the preceding - * content, and matches are found by looking in the ctx - * ->dictCtx->hashTable. - */ -typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; -typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; - - -/*-************************************ -* Local Utils -**************************************/ -int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } -const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } -int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } -int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); } - - -/*-**************************************** -* Internal Definitions, used only in Tests -*******************************************/ -#if defined (__cplusplus) -extern "C" { -#endif - -int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize); - -int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, - int compressedSize, int maxOutputSize, - const void* dictStart, size_t dictSize); -int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, - int compressedSize, int targetOutputSize, int dstCapacity, - const void* dictStart, size_t dictSize); -#if defined (__cplusplus) -} -#endif - -/*-****************************** -* Compression functions -********************************/ -LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType) -{ - if (tableType == byU16) - return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); - else - return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); -} - -LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType) -{ - const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; - if (LZ4_isLittleEndian()) { - const U64 prime5bytes = 889523592379ULL; - return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); - } else { - const U64 prime8bytes = 11400714785074694791ULL; - return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); - } -} - -LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) -{ - if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); - return LZ4_hash4(LZ4_read32(p), tableType); -} - -LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) -{ - switch (tableType) - { - default: /* fallthrough */ - case clearedTable: { /* illegal! */ assert(0); return; } - case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; } - case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; } - case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; } - } -} - -LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) -{ - switch (tableType) - { - default: /* fallthrough */ - case clearedTable: /* fallthrough */ - case byPtr: { /* illegal! */ assert(0); return; } - case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } - case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } - } -} - -LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, - void* tableBase, tableType_t const tableType, - const BYTE* srcBase) -{ - switch (tableType) - { - case clearedTable: { /* illegal! */ assert(0); return; } - case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } - case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } - case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } - } -} - -LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) -{ - U32 const h = LZ4_hashPosition(p, tableType); - LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); -} - -/* LZ4_getIndexOnHash() : - * Index of match position registered in hash table. - * hash position must be calculated by using base+index, or dictBase+index. - * Assumption 1 : only valid if tableType == byU32 or byU16. - * Assumption 2 : h is presumed valid (within limits of hash table) - */ -LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) -{ - LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); - if (tableType == byU32) { - const U32* const hashTable = (const U32*) tableBase; - assert(h < (1U << (LZ4_MEMORY_USAGE-2))); - return hashTable[h]; - } - if (tableType == byU16) { - const U16* const hashTable = (const U16*) tableBase; - assert(h < (1U << (LZ4_MEMORY_USAGE-1))); - return hashTable[h]; - } - assert(0); return 0; /* forbidden case */ -} - -static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase) -{ - if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } - if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; } - { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ -} - -LZ4_FORCE_INLINE const BYTE* -LZ4_getPosition(const BYTE* p, - const void* tableBase, tableType_t tableType, - const BYTE* srcBase) -{ - U32 const h = LZ4_hashPosition(p, tableType); - return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); -} - -LZ4_FORCE_INLINE void -LZ4_prepareTable(LZ4_stream_t_internal* const cctx, - const int inputSize, - const tableType_t tableType) { - /* If the table hasn't been used, it's guaranteed to be zeroed out, and is - * therefore safe to use no matter what mode we're in. Otherwise, we figure - * out if it's safe to leave as is or whether it needs to be reset. - */ - if ((tableType_t)cctx->tableType != clearedTable) { - assert(inputSize >= 0); - if ((tableType_t)cctx->tableType != tableType - || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) - || ((tableType == byU32) && cctx->currentOffset > 1 GB) - || tableType == byPtr - || inputSize >= 4 KB) - { - DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx); - MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); - cctx->currentOffset = 0; - cctx->tableType = (U32)clearedTable; - } else { - DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); - } - } - - /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, - * is faster than compressing without a gap. - * However, compressing with currentOffset == 0 is faster still, - * so we preserve that case. - */ - if (cctx->currentOffset != 0 && tableType == byU32) { - DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); - cctx->currentOffset += 64 KB; - } - - /* Finally, clear history */ - cctx->dictCtx = NULL; - cctx->dictionary = NULL; - cctx->dictSize = 0; -} - -/** LZ4_compress_generic() : - * inlined, to ensure branches are decided at compilation time. - * Presumed already validated at this stage: - * - source != NULL - * - inputSize > 0 - */ -LZ4_FORCE_INLINE int LZ4_compress_generic_validated( - LZ4_stream_t_internal* const cctx, - const char* const source, - char* const dest, - const int inputSize, - int* inputConsumed, /* only written when outputDirective == fillOutput */ - const int maxOutputSize, - const limitedOutput_directive outputDirective, - const tableType_t tableType, - const dict_directive dictDirective, - const dictIssue_directive dictIssue, - const int acceleration) -{ - int result; - const BYTE* ip = (const BYTE*) source; - - U32 const startIndex = cctx->currentOffset; - const BYTE* base = (const BYTE*) source - startIndex; - const BYTE* lowLimit; - - const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; - const BYTE* const dictionary = - dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; - const U32 dictSize = - dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; - const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */ - - int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); - U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ - const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary; - const BYTE* anchor = (const BYTE*) source; - const BYTE* const iend = ip + inputSize; - const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; - const BYTE* const matchlimit = iend - LASTLITERALS; - - /* the dictCtx currentOffset is indexed on the start of the dictionary, - * while a dictionary in the current context precedes the currentOffset */ - const BYTE* dictBase = (dictionary == NULL) ? NULL : - (dictDirective == usingDictCtx) ? - dictionary + dictSize - dictCtx->currentOffset : - dictionary + dictSize - startIndex; - - BYTE* op = (BYTE*) dest; - BYTE* const olimit = op + maxOutputSize; - - U32 offset = 0; - U32 forwardH; - - DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType); - assert(ip != NULL); - /* If init conditions are not met, we don't have to mark stream - * as having dirty context, since no action was taken yet */ - if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */ - if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */ - if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ - assert(acceleration >= 1); - - lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); - - /* Update context state */ - if (dictDirective == usingDictCtx) { - /* Subsequent linked blocks can't use the dictionary. */ - /* Instead, they use the block we just compressed. */ - cctx->dictCtx = NULL; - cctx->dictSize = (U32)inputSize; - } else { - cctx->dictSize += (U32)inputSize; - } - cctx->currentOffset += (U32)inputSize; - cctx->tableType = (U32)tableType; - - if (inputSizehashTable, tableType, base); - ip++; forwardH = LZ4_hashPosition(ip, tableType); - - /* Main Loop */ - for ( ; ; ) { - const BYTE* match; - BYTE* token; - const BYTE* filledIp; - - /* Find a match */ - if (tableType == byPtr) { - const BYTE* forwardIp = ip; - int step = 1; - int searchMatchNb = acceleration << LZ4_skipTrigger; - do { - U32 const h = forwardH; - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; - assert(ip < mflimitPlusOne); - - match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); - - } while ( (match+LZ4_DISTANCE_MAX < ip) - || (LZ4_read32(match) != LZ4_read32(ip)) ); - - } else { /* byU32, byU16 */ - - const BYTE* forwardIp = ip; - int step = 1; - int searchMatchNb = acceleration << LZ4_skipTrigger; - do { - U32 const h = forwardH; - U32 const current = (U32)(forwardIp - base); - U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); - assert(matchIndex <= current); - assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; - assert(ip < mflimitPlusOne); - - if (dictDirective == usingDictCtx) { - if (matchIndex < startIndex) { - /* there was no match, try the dictionary */ - assert(tableType == byU32); - matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); - match = dictBase + matchIndex; - matchIndex += dictDelta; /* make dictCtx index comparable with current context */ - lowLimit = dictionary; - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; - } - } else if (dictDirective == usingExtDict) { - if (matchIndex < startIndex) { - DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); - assert(startIndex - matchIndex >= MINMATCH); - assert(dictBase); - match = dictBase + matchIndex; - lowLimit = dictionary; - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; - } - } else { /* single continuous memory segment */ - match = base + matchIndex; - } - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); - - DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); - if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */ - assert(matchIndex < current); - if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) - && (matchIndex+LZ4_DISTANCE_MAX < current)) { - continue; - } /* too far */ - assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ - - if (LZ4_read32(match) == LZ4_read32(ip)) { - if (maybe_extMem) offset = current - matchIndex; - break; /* match found */ - } - - } while(1); - } - - /* Catch up */ - filledIp = ip; - while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } - - /* Encode Literals */ - { unsigned const litLength = (unsigned)(ip - anchor); - token = op++; - if ((outputDirective == limitedOutput) && /* Check output buffer overflow */ - (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { - return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ - } - if ((outputDirective == fillOutput) && - (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { - op--; - goto _last_literals; - } - if (litLength >= RUN_MASK) { - int len = (int)(litLength - RUN_MASK); - *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; - *op++ = (BYTE)len; - } - else *token = (BYTE)(litLength< olimit)) { - /* the match was too close to the end, rewind and go to last literals */ - op = token; - goto _last_literals; - } - - /* Encode Offset */ - if (maybe_extMem) { /* static test */ - DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); - assert(offset <= LZ4_DISTANCE_MAX && offset > 0); - LZ4_writeLE16(op, (U16)offset); op+=2; - } else { - DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); - assert(ip-match <= LZ4_DISTANCE_MAX); - LZ4_writeLE16(op, (U16)(ip - match)); op+=2; - } - - /* Encode MatchLength */ - { unsigned matchCode; - - if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) - && (lowLimit==dictionary) /* match within extDict */ ) { - const BYTE* limit = ip + (dictEnd-match); - assert(dictEnd > match); - if (limit > matchlimit) limit = matchlimit; - matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); - ip += (size_t)matchCode + MINMATCH; - if (ip==limit) { - unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); - matchCode += more; - ip += more; - } - DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); - } else { - matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); - ip += (size_t)matchCode + MINMATCH; - DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); - } - - if ((outputDirective) && /* Check output buffer overflow */ - (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { - if (outputDirective == fillOutput) { - /* Match description too long : reduce it */ - U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; - ip -= matchCode - newMatchCode; - assert(newMatchCode < matchCode); - matchCode = newMatchCode; - if (unlikely(ip <= filledIp)) { - /* We have already filled up to filledIp so if ip ends up less than filledIp - * we have positions in the hash table beyond the current position. This is - * a problem if we reuse the hash table. So we have to remove these positions - * from the hash table. - */ - const BYTE* ptr; - DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); - for (ptr = ip; ptr <= filledIp; ++ptr) { - U32 const h = LZ4_hashPosition(ptr, tableType); - LZ4_clearHash(h, cctx->hashTable, tableType); - } - } - } else { - assert(outputDirective == limitedOutput); - return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ - } - } - if (matchCode >= ML_MASK) { - *token += ML_MASK; - matchCode -= ML_MASK; - LZ4_write32(op, 0xFFFFFFFF); - while (matchCode >= 4*255) { - op+=4; - LZ4_write32(op, 0xFFFFFFFF); - matchCode -= 4*255; - } - op += matchCode / 255; - *op++ = (BYTE)(matchCode % 255); - } else - *token += (BYTE)(matchCode); - } - /* Ensure we have enough space for the last literals. */ - assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit)); - - anchor = ip; - - /* Test end of chunk */ - if (ip >= mflimitPlusOne) break; - - /* Fill table */ - LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); - - /* Test next position */ - if (tableType == byPtr) { - - match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); - LZ4_putPosition(ip, cctx->hashTable, tableType, base); - if ( (match+LZ4_DISTANCE_MAX >= ip) - && (LZ4_read32(match) == LZ4_read32(ip)) ) - { token=op++; *token=0; goto _next_match; } - - } else { /* byU32, byU16 */ - - U32 const h = LZ4_hashPosition(ip, tableType); - U32 const current = (U32)(ip-base); - U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); - assert(matchIndex < current); - if (dictDirective == usingDictCtx) { - if (matchIndex < startIndex) { - /* there was no match, try the dictionary */ - matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); - match = dictBase + matchIndex; - lowLimit = dictionary; /* required for match length counter */ - matchIndex += dictDelta; - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; /* required for match length counter */ - } - } else if (dictDirective==usingExtDict) { - if (matchIndex < startIndex) { - assert(dictBase); - match = dictBase + matchIndex; - lowLimit = dictionary; /* required for match length counter */ - } else { - match = base + matchIndex; - lowLimit = (const BYTE*)source; /* required for match length counter */ - } - } else { /* single memory segment */ - match = base + matchIndex; - } - LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); - assert(matchIndex < current); - if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) - && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current)) - && (LZ4_read32(match) == LZ4_read32(ip)) ) { - token=op++; - *token=0; - if (maybe_extMem) offset = current - matchIndex; - DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", - (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); - goto _next_match; - } - } - - /* Prepare next loop */ - forwardH = LZ4_hashPosition(++ip, tableType); - - } - -_last_literals: - /* Encode Last Literals */ - { size_t lastRun = (size_t)(iend - anchor); - if ( (outputDirective) && /* Check output buffer overflow */ - (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { - if (outputDirective == fillOutput) { - /* adapt lastRun to fill 'dst' */ - assert(olimit >= op); - lastRun = (size_t)(olimit-op) - 1/*token*/; - lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/ - } else { - assert(outputDirective == limitedOutput); - return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ - } - } - DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun); - if (lastRun >= RUN_MASK) { - size_t accumulator = lastRun - RUN_MASK; - *op++ = RUN_MASK << ML_BITS; - for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRun< 0); - DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result); - return result; -} - -/** LZ4_compress_generic() : - * inlined, to ensure branches are decided at compilation time; - * takes care of src == (NULL, 0) - * and forward the rest to LZ4_compress_generic_validated */ -LZ4_FORCE_INLINE int LZ4_compress_generic( - LZ4_stream_t_internal* const cctx, - const char* const src, - char* const dst, - const int srcSize, - int *inputConsumed, /* only written when outputDirective == fillOutput */ - const int dstCapacity, - const limitedOutput_directive outputDirective, - const tableType_t tableType, - const dict_directive dictDirective, - const dictIssue_directive dictIssue, - const int acceleration) -{ - DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", - srcSize, dstCapacity); - - if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */ - if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */ - if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */ - DEBUGLOG(5, "Generating an empty block"); - assert(outputDirective == notLimited || dstCapacity >= 1); - assert(dst != NULL); - dst[0] = 0; - if (outputDirective == fillOutput) { - assert (inputConsumed != NULL); - *inputConsumed = 0; - } - return 1; - } - assert(src != NULL); - - return LZ4_compress_generic_validated(cctx, src, dst, srcSize, - inputConsumed, /* only written into if outputDirective == fillOutput */ - dstCapacity, outputDirective, - tableType, dictDirective, dictIssue, acceleration); -} - - -int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) -{ - LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse; - assert(ctx != NULL); - if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; - if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; - if (maxOutputSize >= LZ4_compressBound(inputSize)) { - if (inputSize < LZ4_64Klimit) { - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); - } - } else { - if (inputSize < LZ4_64Klimit) { - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); - } - } -} - -/** - * LZ4_compress_fast_extState_fastReset() : - * A variant of LZ4_compress_fast_extState(). - * - * Using this variant avoids an expensive initialization step. It is only safe - * to call if the state buffer is known to be correctly initialized already - * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of - * "correctly initialized"). - */ -int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) -{ - LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; - if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; - if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; - - if (dstCapacity >= LZ4_compressBound(srcSize)) { - if (srcSize < LZ4_64Klimit) { - const tableType_t tableType = byU16; - LZ4_prepareTable(ctx, srcSize, tableType); - if (ctx->currentOffset) { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); - } else { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); - } - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - LZ4_prepareTable(ctx, srcSize, tableType); - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); - } - } else { - if (srcSize < LZ4_64Klimit) { - const tableType_t tableType = byU16; - LZ4_prepareTable(ctx, srcSize, tableType); - if (ctx->currentOffset) { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); - } else { - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); - } - } else { - const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - LZ4_prepareTable(ctx, srcSize, tableType); - return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); - } - } -} - - -int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) -{ - int result; -#if (LZ4_HEAPMODE) - LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ - if (ctxPtr == NULL) return 0; -#else - LZ4_stream_t ctx; - LZ4_stream_t* const ctxPtr = &ctx; -#endif - result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); - -#if (LZ4_HEAPMODE) - FREEMEM(ctxPtr); -#endif - return result; -} - - -int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize) -{ - return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1); -} - - -/* Note!: This function leaves the stream in an unclean/broken state! - * It is not safe to subsequently use the same state with a _fastReset() or - * _continue() call without resetting it. */ -static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize) -{ - void* const s = LZ4_initStream(state, sizeof (*state)); - assert(s != NULL); (void)s; - - if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ - return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); - } else { - if (*srcSizePtr < LZ4_64Klimit) { - return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1); - } else { - tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1); - } } -} - - -int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) -{ -#if (LZ4_HEAPMODE) - LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ - if (ctx == NULL) return 0; -#else - LZ4_stream_t ctxBody; - LZ4_stream_t* ctx = &ctxBody; -#endif - - int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); - -#if (LZ4_HEAPMODE) - FREEMEM(ctx); -#endif - return result; -} - - - -/*-****************************** -* Streaming functions -********************************/ - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4_stream_t* LZ4_createStream(void) -{ - LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); - LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal)); - DEBUGLOG(4, "LZ4_createStream %p", lz4s); - if (lz4s == NULL) return NULL; - LZ4_initStream(lz4s, sizeof(*lz4s)); - return lz4s; -} -#endif - -static size_t LZ4_stream_t_alignment(void) -{ -#if LZ4_ALIGN_TEST - typedef struct { char c; LZ4_stream_t t; } t_a; - return sizeof(t_a) - sizeof(LZ4_stream_t); -#else - return 1; /* effectively disabled */ -#endif -} - -LZ4_stream_t* LZ4_initStream (void* buffer, size_t size) -{ - DEBUGLOG(5, "LZ4_initStream"); - if (buffer == NULL) { return NULL; } - if (size < sizeof(LZ4_stream_t)) { return NULL; } - if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL; - MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal)); - return (LZ4_stream_t*)buffer; -} - -/* resetStream is now deprecated, - * prefer initStream() which is more general */ -void LZ4_resetStream (LZ4_stream_t* LZ4_stream) -{ - DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream); - MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal)); -} - -void LZ4_resetStream_fast(LZ4_stream_t* ctx) { - LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); -} - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -int LZ4_freeStream (LZ4_stream_t* LZ4_stream) -{ - if (!LZ4_stream) return 0; /* support free on NULL */ - DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream); - FREEMEM(LZ4_stream); - return (0); -} -#endif - - -#define HASH_UNIT sizeof(reg_t) -int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) -{ - LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; - const tableType_t tableType = byU32; - const BYTE* p = (const BYTE*)dictionary; - const BYTE* const dictEnd = p + dictSize; - const BYTE* base; - - DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); - - /* It's necessary to reset the context, - * and not just continue it with prepareTable() - * to avoid any risk of generating overflowing matchIndex - * when compressing using this dictionary */ - LZ4_resetStream(LZ4_dict); - - /* We always increment the offset by 64 KB, since, if the dict is longer, - * we truncate it to the last 64k, and if it's shorter, we still want to - * advance by a whole window length so we can provide the guarantee that - * there are only valid offsets in the window, which allows an optimization - * in LZ4_compress_fast_continue() where it uses noDictIssue even when the - * dictionary isn't a full 64k. */ - dict->currentOffset += 64 KB; - - if (dictSize < (int)HASH_UNIT) { - return 0; - } - - if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; - base = dictEnd - dict->currentOffset; - dict->dictionary = p; - dict->dictSize = (U32)(dictEnd - p); - dict->tableType = (U32)tableType; - - while (p <= dictEnd-HASH_UNIT) { - LZ4_putPosition(p, dict->hashTable, tableType, base); - p+=3; - } - - return (int)dict->dictSize; -} - -void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) -{ - const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL : - &(dictionaryStream->internal_donotuse); - - DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", - workingStream, dictionaryStream, - dictCtx != NULL ? dictCtx->dictSize : 0); - - if (dictCtx != NULL) { - /* If the current offset is zero, we will never look in the - * external dictionary context, since there is no value a table - * entry can take that indicate a miss. In that case, we need - * to bump the offset to something non-zero. - */ - if (workingStream->internal_donotuse.currentOffset == 0) { - workingStream->internal_donotuse.currentOffset = 64 KB; - } - - /* Don't actually attach an empty dictionary. - */ - if (dictCtx->dictSize == 0) { - dictCtx = NULL; - } - } - workingStream->internal_donotuse.dictCtx = dictCtx; -} - - -static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) -{ - assert(nextSize >= 0); - if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ - /* rescale hash table */ - U32 const delta = LZ4_dict->currentOffset - 64 KB; - const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; - int i; - DEBUGLOG(4, "LZ4_renormDictT"); - for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; - else LZ4_dict->hashTable[i] -= delta; - } - LZ4_dict->currentOffset = 64 KB; - if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; - LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; - } -} - - -int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, - const char* source, char* dest, - int inputSize, int maxOutputSize, - int acceleration) -{ - const tableType_t tableType = byU32; - LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse; - const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL; - - DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize); - - LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */ - if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; - if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; - - /* invalidate tiny dictionaries */ - if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */ - && (dictEnd != source) /* prefix mode */ - && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */ - && (streamPtr->dictCtx == NULL) /* usingDictCtx */ - ) { - DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary); - /* remove dictionary existence from history, to employ faster prefix mode */ - streamPtr->dictSize = 0; - streamPtr->dictionary = (const BYTE*)source; - dictEnd = source; - } - - /* Check overlapping input/dictionary space */ - { const char* const sourceEnd = source + inputSize; - if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) { - streamPtr->dictSize = (U32)(dictEnd - sourceEnd); - if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; - if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; - streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize; - } - } - - /* prefix mode : source data follows dictionary */ - if (dictEnd == source) { - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); - else - return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); - } - - /* external dictionary mode */ - { int result; - if (streamPtr->dictCtx) { - /* We depend here on the fact that dictCtx'es (produced by - * LZ4_loadDict) guarantee that their tables contain no references - * to offsets between dictCtx->currentOffset - 64 KB and - * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe - * to use noDictIssue even when the dict isn't a full 64 KB. - */ - if (inputSize > 4 KB) { - /* For compressing large blobs, it is faster to pay the setup - * cost to copy the dictionary's tables into the active context, - * so that the compression loop is only looking into one table. - */ - LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr)); - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); - } else { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); - } - } else { /* small data <= 4 KB */ - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); - } else { - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); - } - } - streamPtr->dictionary = (const BYTE*)source; - streamPtr->dictSize = (U32)inputSize; - return result; - } -} - - -/* Hidden debug function, to force-test external dictionary mode */ -int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) -{ - LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; - int result; - - LZ4_renormDictT(streamPtr, srcSize); - - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { - result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); - } else { - result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); - } - - streamPtr->dictionary = (const BYTE*)source; - streamPtr->dictSize = (U32)srcSize; - - return result; -} - - -/*! LZ4_saveDict() : - * If previously compressed data block is not guaranteed to remain available at its memory location, - * save it into a safer place (char* safeBuffer). - * Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable, - * one can therefore call LZ4_compress_fast_continue() right after. - * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. - */ -int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) -{ - LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; - - DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer); - - if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */ - if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } - - if (safeBuffer == NULL) assert(dictSize == 0); - if (dictSize > 0) { - const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; - assert(dict->dictionary); - LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize); - } - - dict->dictionary = (const BYTE*)safeBuffer; - dict->dictSize = (U32)dictSize; - - return dictSize; -} - - - -/*-******************************* - * Decompression functions - ********************************/ - -typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; - -#undef MIN -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) - - -/* variant for decompress_unsafe() - * does not know end of input - * presumes input is well formed - * note : will consume at least one byte */ -size_t read_long_length_no_check(const BYTE** pp) -{ - size_t b, l = 0; - do { b = **pp; (*pp)++; l += b; } while (b==255); - DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1) - return l; -} - -/* core decoder variant for LZ4_decompress_fast*() - * for legacy support only : these entry points are deprecated. - * - Presumes input is correctly formed (no defense vs malformed inputs) - * - Does not know input size (presume input buffer is "large enough") - * - Decompress a full block (only) - * @return : nb of bytes read from input. - * Note : this variant is not optimized for speed, just for maintenance. - * the goal is to remove support of decompress_fast*() variants by v2.0 -**/ -LZ4_FORCE_INLINE int -LZ4_decompress_unsafe_generic( - const BYTE* const istart, - BYTE* const ostart, - int decompressedSize, - - size_t prefixSize, - const BYTE* const dictStart, /* only if dict==usingExtDict */ - const size_t dictSize /* note: =0 if dictStart==NULL */ - ) -{ - const BYTE* ip = istart; - BYTE* op = (BYTE*)ostart; - BYTE* const oend = ostart + decompressedSize; - const BYTE* const prefixStart = ostart - prefixSize; - - DEBUGLOG(5, "LZ4_decompress_unsafe_generic"); - if (dictStart == NULL) assert(dictSize == 0); - - while (1) { - /* start new sequence */ - unsigned token = *ip++; - - /* literals */ - { size_t ll = token >> ML_BITS; - if (ll==15) { - /* long literal length */ - ll += read_long_length_no_check(&ip); - } - if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */ - LZ4_memmove(op, ip, ll); /* support in-place decompression */ - op += ll; - ip += ll; - if ((size_t)(oend-op) < MFLIMIT) { - if (op==oend) break; /* end of block */ - DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op); - /* incorrect end of block : - * last match must start at least MFLIMIT==12 bytes before end of output block */ - return -1; - } } - - /* match */ - { size_t ml = token & 15; - size_t const offset = LZ4_readLE16(ip); - ip+=2; - - if (ml==15) { - /* long literal length */ - ml += read_long_length_no_check(&ip); - } - ml += MINMATCH; - - if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */ - - { const BYTE* match = op - offset; - - /* out of range */ - if (offset > (size_t)(op - prefixStart) + dictSize) { - DEBUGLOG(6, "offset out of range"); - return -1; - } - - /* check special case : extDict */ - if (offset > (size_t)(op - prefixStart)) { - /* extDict scenario */ - const BYTE* const dictEnd = dictStart + dictSize; - const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart)); - size_t const extml = (size_t)(dictEnd - extMatch); - if (extml > ml) { - /* match entirely within extDict */ - LZ4_memmove(op, extMatch, ml); - op += ml; - ml = 0; - } else { - /* match split between extDict & prefix */ - LZ4_memmove(op, extMatch, extml); - op += extml; - ml -= extml; - } - match = prefixStart; - } - - /* match copy - slow variant, supporting overlap copy */ - { size_t u; - for (u=0; u= ipmax before start of loop. Returns initial_error if so. - * @error (output) - error code. Must be set to 0 before call. -**/ -typedef size_t Rvl_t; -static const Rvl_t rvl_error = (Rvl_t)(-1); -LZ4_FORCE_INLINE Rvl_t -read_variable_length(const BYTE** ip, const BYTE* ilimit, - int initial_check) -{ - Rvl_t s, length = 0; - assert(ip != NULL); - assert(*ip != NULL); - assert(ilimit != NULL); - if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */ - return rvl_error; - } - do { - s = **ip; - (*ip)++; - length += s; - if (unlikely((*ip) > ilimit)) { /* read limit reached */ - return rvl_error; - } - /* accumulator overflow detection (32-bit mode only) */ - if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { - return rvl_error; - } - } while (s==255); - - return length; -} - -/*! LZ4_decompress_generic() : - * This generic decompression function covers all use cases. - * It shall be instantiated several times, using different sets of directives. - * Note that it is important for performance that this function really get inlined, - * in order to remove useless branches during compilation optimization. - */ -LZ4_FORCE_INLINE int -LZ4_decompress_generic( - const char* const src, - char* const dst, - int srcSize, - int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ - - earlyEnd_directive partialDecoding, /* full, partial */ - dict_directive dict, /* noDict, withPrefix64k, usingExtDict */ - const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ - const BYTE* const dictStart, /* only if dict==usingExtDict */ - const size_t dictSize /* note : = 0 if noDict */ - ) -{ - if ((src == NULL) || (outputSize < 0)) { return -1; } - - { const BYTE* ip = (const BYTE*) src; - const BYTE* const iend = ip + srcSize; - - BYTE* op = (BYTE*) dst; - BYTE* const oend = op + outputSize; - BYTE* cpy; - - const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize; - - const int checkOffset = (dictSize < (int)(64 KB)); - - - /* Set up the "end" pointers for the shortcut. */ - const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/; - const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/; - - const BYTE* match; - size_t offset; - unsigned token; - size_t length; - - - DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize); - - /* Special cases */ - assert(lowPrefix <= op); - if (unlikely(outputSize==0)) { - /* Empty output buffer */ - if (partialDecoding) return 0; - return ((srcSize==1) && (*ip==0)) ? 0 : -1; - } - if (unlikely(srcSize==0)) { return -1; } - - /* LZ4_FAST_DEC_LOOP: - * designed for modern OoO performance cpus, - * where copying reliably 32-bytes is preferable to an unpredictable branch. - * note : fast loop may show a regression for some client arm chips. */ -#if LZ4_FAST_DEC_LOOP - if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { - DEBUGLOG(6, "skip fast decode loop"); - goto safe_decode; - } - - /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */ - while (1) { - /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ - assert(oend - op >= FASTLOOP_SAFE_DISTANCE); - assert(ip < iend); - token = *ip++; - length = token >> ML_BITS; /* literal length */ - - /* decode literal length */ - if (length == RUN_MASK) { - size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); - if (addl == rvl_error) { goto _output_error; } - length += addl; - if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ - if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ - - /* copy literals */ - cpy = op+length; - LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); - if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } - LZ4_wildCopy32(op, ip, cpy); - ip += length; op = cpy; - } else { - cpy = op+length; - DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); - /* We don't need to check oend, since we check it once for each loop below */ - if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; } - /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */ - LZ4_memcpy(op, ip, 16); - ip += length; op = cpy; - } - - /* get offset */ - offset = LZ4_readLE16(ip); ip+=2; - match = op - offset; - assert(match <= op); /* overflow check */ - - /* get matchlength */ - length = token & ML_MASK; - - if (length == ML_MASK) { - size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); - if (addl == rvl_error) { goto _output_error; } - length += addl; - length += MINMATCH; - if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ - if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ - if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { - goto safe_match_copy; - } - } else { - length += MINMATCH; - if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { - goto safe_match_copy; - } - - /* Fastpath check: skip LZ4_wildCopy32 when true */ - if ((dict == withPrefix64k) || (match >= lowPrefix)) { - if (offset >= 8) { - assert(match >= lowPrefix); - assert(match <= op); - assert(op + 18 <= oend); - - LZ4_memcpy(op, match, 8); - LZ4_memcpy(op+8, match+8, 8); - LZ4_memcpy(op+16, match+16, 2); - op += length; - continue; - } } } - - if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ - /* match starting within external dictionary */ - if ((dict==usingExtDict) && (match < lowPrefix)) { - assert(dictEnd != NULL); - if (unlikely(op+length > oend-LASTLITERALS)) { - if (partialDecoding) { - DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd"); - length = MIN(length, (size_t)(oend-op)); - } else { - goto _output_error; /* end-of-block condition violated */ - } } - - if (length <= (size_t)(lowPrefix-match)) { - /* match fits entirely within external dictionary : just copy */ - LZ4_memmove(op, dictEnd - (lowPrefix-match), length); - op += length; - } else { - /* match stretches into both external dictionary and current block */ - size_t const copySize = (size_t)(lowPrefix - match); - size_t const restSize = length - copySize; - LZ4_memcpy(op, dictEnd - copySize, copySize); - op += copySize; - if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ - BYTE* const endOfMatch = op + restSize; - const BYTE* copyFrom = lowPrefix; - while (op < endOfMatch) { *op++ = *copyFrom++; } - } else { - LZ4_memcpy(op, lowPrefix, restSize); - op += restSize; - } } - continue; - } - - /* copy match within block */ - cpy = op + length; - - assert((op <= oend) && (oend-op >= 32)); - if (unlikely(offset<16)) { - LZ4_memcpy_using_offset(op, match, cpy, offset); - } else { - LZ4_wildCopy32(op, match, cpy); - } - - op = cpy; /* wildcopy correction */ - } - safe_decode: -#endif - - /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ - while (1) { - assert(ip < iend); - token = *ip++; - length = token >> ML_BITS; /* literal length */ - - /* A two-stage shortcut for the most common case: - * 1) If the literal length is 0..14, and there is enough space, - * enter the shortcut and copy 16 bytes on behalf of the literals - * (in the fast mode, only 8 bytes can be safely copied this way). - * 2) Further if the match length is 4..18, copy 18 bytes in a similar - * manner; but we ensure that there's enough space in the output for - * those 18 bytes earlier, upon entering the shortcut (in other words, - * there is a combined check for both stages). - */ - if ( (length != RUN_MASK) - /* strictly "less than" on input, to re-enter the loop with at least one byte */ - && likely((ip < shortiend) & (op <= shortoend)) ) { - /* Copy the literals */ - LZ4_memcpy(op, ip, 16); - op += length; ip += length; - - /* The second stage: prepare for match copying, decode full info. - * If it doesn't work out, the info won't be wasted. */ - length = token & ML_MASK; /* match length */ - offset = LZ4_readLE16(ip); ip += 2; - match = op - offset; - assert(match <= op); /* check overflow */ - - /* Do not deal with overlapping matches. */ - if ( (length != ML_MASK) - && (offset >= 8) - && (dict==withPrefix64k || match >= lowPrefix) ) { - /* Copy the match. */ - LZ4_memcpy(op + 0, match + 0, 8); - LZ4_memcpy(op + 8, match + 8, 8); - LZ4_memcpy(op +16, match +16, 2); - op += length + MINMATCH; - /* Both stages worked, load the next token. */ - continue; - } - - /* The second stage didn't work out, but the info is ready. - * Propel it right to the point of match copying. */ - goto _copy_match; - } - - /* decode literal length */ - if (length == RUN_MASK) { - size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); - if (addl == rvl_error) { goto _output_error; } - length += addl; - if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ - if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ - } - - /* copy literals */ - cpy = op+length; -#if LZ4_FAST_DEC_LOOP - safe_literal_copy: -#endif - LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); - if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) { - /* We've either hit the input parsing restriction or the output parsing restriction. - * In the normal scenario, decoding a full block, it must be the last sequence, - * otherwise it's an error (invalid input or dimensions). - * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow. - */ - if (partialDecoding) { - /* Since we are partial decoding we may be in this block because of the output parsing - * restriction, which is not valid since the output buffer is allowed to be undersized. - */ - DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end") - DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length); - DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op)); - DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip)); - /* Finishing in the middle of a literals segment, - * due to lack of input. - */ - if (ip+length > iend) { - length = (size_t)(iend-ip); - cpy = op + length; - } - /* Finishing in the middle of a literals segment, - * due to lack of output space. - */ - if (cpy > oend) { - cpy = oend; - assert(op<=oend); - length = (size_t)(oend-op); - } - } else { - /* We must be on the last sequence (or invalid) because of the parsing limitations - * so check that we exactly consume the input and don't overrun the output buffer. - */ - if ((ip+length != iend) || (cpy > oend)) { - DEBUGLOG(6, "should have been last run of literals") - DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend); - DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend); - goto _output_error; - } - } - LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */ - ip += length; - op += length; - /* Necessarily EOF when !partialDecoding. - * When partialDecoding, it is EOF if we've either - * filled the output buffer or - * can't proceed with reading an offset for following match. - */ - if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) { - break; - } - } else { - LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */ - ip += length; op = cpy; - } - - /* get offset */ - offset = LZ4_readLE16(ip); ip+=2; - match = op - offset; - - /* get matchlength */ - length = token & ML_MASK; - - _copy_match: - if (length == ML_MASK) { - size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); - if (addl == rvl_error) { goto _output_error; } - length += addl; - if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ - } - length += MINMATCH; - -#if LZ4_FAST_DEC_LOOP - safe_match_copy: -#endif - if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ - /* match starting within external dictionary */ - if ((dict==usingExtDict) && (match < lowPrefix)) { - assert(dictEnd != NULL); - if (unlikely(op+length > oend-LASTLITERALS)) { - if (partialDecoding) length = MIN(length, (size_t)(oend-op)); - else goto _output_error; /* doesn't respect parsing restriction */ - } - - if (length <= (size_t)(lowPrefix-match)) { - /* match fits entirely within external dictionary : just copy */ - LZ4_memmove(op, dictEnd - (lowPrefix-match), length); - op += length; - } else { - /* match stretches into both external dictionary and current block */ - size_t const copySize = (size_t)(lowPrefix - match); - size_t const restSize = length - copySize; - LZ4_memcpy(op, dictEnd - copySize, copySize); - op += copySize; - if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ - BYTE* const endOfMatch = op + restSize; - const BYTE* copyFrom = lowPrefix; - while (op < endOfMatch) *op++ = *copyFrom++; - } else { - LZ4_memcpy(op, lowPrefix, restSize); - op += restSize; - } } - continue; - } - assert(match >= lowPrefix); - - /* copy match within block */ - cpy = op + length; - - /* partialDecoding : may end anywhere within the block */ - assert(op<=oend); - if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { - size_t const mlen = MIN(length, (size_t)(oend-op)); - const BYTE* const matchEnd = match + mlen; - BYTE* const copyEnd = op + mlen; - if (matchEnd > op) { /* overlap copy */ - while (op < copyEnd) { *op++ = *match++; } - } else { - LZ4_memcpy(op, match, mlen); - } - op = copyEnd; - if (op == oend) { break; } - continue; - } - - if (unlikely(offset<8)) { - LZ4_write32(op, 0); /* silence msan warning when offset==0 */ - op[0] = match[0]; - op[1] = match[1]; - op[2] = match[2]; - op[3] = match[3]; - match += inc32table[offset]; - LZ4_memcpy(op+4, match, 4); - match -= dec64table[offset]; - } else { - LZ4_memcpy(op, match, 8); - match += 8; - } - op += 8; - - if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { - BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); - if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ - if (op < oCopyLimit) { - LZ4_wildCopy8(op, match, oCopyLimit); - match += oCopyLimit - op; - op = oCopyLimit; - } - while (op < cpy) { *op++ = *match++; } - } else { - LZ4_memcpy(op, match, 8); - if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } - } - op = cpy; /* wildcopy correction */ - } - - /* end of decoding */ - DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst)); - return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ - - /* Overflow error detected */ - _output_error: - return (int) (-(((const char*)ip)-src))-1; - } -} - - -/*===== Instantiate the API decoding functions. =====*/ - -LZ4_FORCE_O2 -int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, - decode_full_block, noDict, - (BYTE*)dest, NULL, 0); -} - -LZ4_FORCE_O2 -int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, - partial_decode, - noDict, (BYTE*)dst, NULL, 0); -} - -LZ4_FORCE_O2 -int LZ4_decompress_fast(const char* source, char* dest, int originalSize) -{ - DEBUGLOG(5, "LZ4_decompress_fast"); - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - 0, NULL, 0); -} - -/*===== Instantiate a few more decoding cases, used more than once. =====*/ - -LZ4_FORCE_O2 /* Exported, an obsolete API function. */ -int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, withPrefix64k, - (BYTE*)dest - 64 KB, NULL, 0); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, - partial_decode, withPrefix64k, - (BYTE*)dest - 64 KB, NULL, 0); -} - -/* Another obsolete API function, paired with the previous one. */ -int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) -{ - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - 64 KB, NULL, 0); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, - size_t prefixSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, noDict, - (BYTE*)dest-prefixSize, NULL, 0); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, - size_t prefixSize) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, - partial_decode, noDict, - (BYTE*)dest-prefixSize, NULL, 0); -} - -LZ4_FORCE_O2 -int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, - int compressedSize, int maxOutputSize, - const void* dictStart, size_t dictSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, usingExtDict, - (BYTE*)dest, (const BYTE*)dictStart, dictSize); -} - -LZ4_FORCE_O2 -int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest, - int compressedSize, int targetOutputSize, int dstCapacity, - const void* dictStart, size_t dictSize) -{ - dstCapacity = MIN(targetOutputSize, dstCapacity); - return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity, - partial_decode, usingExtDict, - (BYTE*)dest, (const BYTE*)dictStart, dictSize); -} - -LZ4_FORCE_O2 -static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, - const void* dictStart, size_t dictSize) -{ - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - 0, (const BYTE*)dictStart, dictSize); -} - -/* The "double dictionary" mode, for use with e.g. ring buffers: the first part - * of the dictionary is passed as prefix, and the second via dictStart + dictSize. - * These routines are used only once, in LZ4_decompress_*_continue(). - */ -LZ4_FORCE_INLINE -int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, - size_t prefixSize, const void* dictStart, size_t dictSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - decode_full_block, usingExtDict, - (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); -} - -/*===== streaming decompression functions =====*/ - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4_streamDecode_t* LZ4_createStreamDecode(void) -{ - LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal)); - return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); -} - -int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) -{ - if (LZ4_stream == NULL) { return 0; } /* support free on NULL */ - FREEMEM(LZ4_stream); - return 0; -} -#endif - -/*! LZ4_setStreamDecode() : - * Use this function to instruct where to find the dictionary. - * This function is not necessary if previous data is still available where it was decoded. - * Loading a size of 0 is allowed (same effect as no dictionary). - * @return : 1 if OK, 0 if error - */ -int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) -{ - LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; - lz4sd->prefixSize = (size_t)dictSize; - if (dictSize) { - assert(dictionary != NULL); - lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; - } else { - lz4sd->prefixEnd = (const BYTE*) dictionary; - } - lz4sd->externalDict = NULL; - lz4sd->extDictSize = 0; - return 1; -} - -/*! LZ4_decoderRingBufferSize() : - * when setting a ring buffer for streaming decompression (optional scenario), - * provides the minimum size of this ring buffer - * to be compatible with any source respecting maxBlockSize condition. - * Note : in a ring buffer scenario, - * blocks are presumed decompressed next to each other. - * When not enough space remains for next block (remainingSize < maxBlockSize), - * decoding resumes from beginning of ring buffer. - * @return : minimum ring buffer size, - * or 0 if there is an error (invalid maxBlockSize). - */ -int LZ4_decoderRingBufferSize(int maxBlockSize) -{ - if (maxBlockSize < 0) return 0; - if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; - if (maxBlockSize < 16) maxBlockSize = 16; - return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); -} - -/* -*_continue() : - These decoding functions allow decompression of multiple blocks in "streaming" mode. - Previously decoded blocks must still be available at the memory position where they were decoded. - If it's not possible, save the relevant part of decoded data into a safe buffer, - and indicate where it stands using LZ4_setStreamDecode() -*/ -LZ4_FORCE_O2 -int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) -{ - LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; - int result; - - if (lz4sd->prefixSize == 0) { - /* The first call, no dictionary yet. */ - assert(lz4sd->extDictSize == 0); - result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)result; - lz4sd->prefixEnd = (BYTE*)dest + result; - } else if (lz4sd->prefixEnd == (BYTE*)dest) { - /* They're rolling the current segment. */ - if (lz4sd->prefixSize >= 64 KB - 1) - result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); - else if (lz4sd->extDictSize == 0) - result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, - lz4sd->prefixSize); - else - result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize += (size_t)result; - lz4sd->prefixEnd += result; - } else { - /* The buffer wraps around, or they're switching to another buffer. */ - lz4sd->extDictSize = lz4sd->prefixSize; - lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, - lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)result; - lz4sd->prefixEnd = (BYTE*)dest + result; - } - - return result; -} - -LZ4_FORCE_O2 int -LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, - const char* source, char* dest, int originalSize) -{ - LZ4_streamDecode_t_internal* const lz4sd = - (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse); - int result; - - DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize); - assert(originalSize >= 0); - - if (lz4sd->prefixSize == 0) { - DEBUGLOG(5, "first invocation : no prefix nor extDict"); - assert(lz4sd->extDictSize == 0); - result = LZ4_decompress_fast(source, dest, originalSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)originalSize; - lz4sd->prefixEnd = (BYTE*)dest + originalSize; - } else if (lz4sd->prefixEnd == (BYTE*)dest) { - DEBUGLOG(5, "continue using existing prefix"); - result = LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - lz4sd->prefixSize, - lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize += (size_t)originalSize; - lz4sd->prefixEnd += originalSize; - } else { - DEBUGLOG(5, "prefix becomes extDict"); - lz4sd->extDictSize = lz4sd->prefixSize; - lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_fast_extDict(source, dest, originalSize, - lz4sd->externalDict, lz4sd->extDictSize); - if (result <= 0) return result; - lz4sd->prefixSize = (size_t)originalSize; - lz4sd->prefixEnd = (BYTE*)dest + originalSize; - } - - return result; -} - - -/* -Advanced decoding functions : -*_usingDict() : - These decoding functions work the same as "_continue" ones, - the dictionary must be explicitly provided within parameters -*/ - -int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) -{ - if (dictSize==0) - return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); - if (dictStart+dictSize == dest) { - if (dictSize >= 64 KB - 1) { - return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize); -} - -int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize) -{ - if (dictSize==0) - return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity); - if (dictStart+dictSize == dest) { - if (dictSize >= 64 KB - 1) { - return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize); - } - assert(dictSize >= 0); - return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize); -} - -int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) -{ - if (dictSize==0 || dictStart+dictSize == dest) - return LZ4_decompress_unsafe_generic( - (const BYTE*)source, (BYTE*)dest, originalSize, - (size_t)dictSize, NULL, 0); - assert(dictSize >= 0); - return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize); -} - - -/*=************************************************* -* Obsolete Functions -***************************************************/ -/* obsolete compression functions */ -int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) -{ - return LZ4_compress_default(source, dest, inputSize, maxOutputSize); -} -int LZ4_compress(const char* src, char* dest, int srcSize) -{ - return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize)); -} -int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) -{ - return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); -} -int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) -{ - return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); -} -int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) -{ - return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); -} -int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) -{ - return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); -} - -/* -These decompression functions are deprecated and should no longer be used. -They are only provided here for compatibility with older user programs. -- LZ4_uncompress is totally equivalent to LZ4_decompress_fast -- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe -*/ -int LZ4_uncompress (const char* source, char* dest, int outputSize) -{ - return LZ4_decompress_fast(source, dest, outputSize); -} -int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) -{ - return LZ4_decompress_safe(source, dest, isize, maxOutputSize); -} - -/* Obsolete Streaming functions */ - -int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); } - -int LZ4_resetStreamState(void* state, char* inputBuffer) -{ - (void)inputBuffer; - LZ4_resetStream((LZ4_stream_t*)state); - return 0; -} - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -void* LZ4_create (char* inputBuffer) -{ - (void)inputBuffer; - return LZ4_createStream(); -} -#endif - -char* LZ4_slideInputBuffer (void* state) -{ - /* avoid const char * -> char * conversion warning */ - return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; -} - -#endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/librocksdb-sys/lz4/lib/lz4.h b/librocksdb-sys/lz4/lib/lz4.h deleted file mode 100644 index 491c608..0000000 --- a/librocksdb-sys/lz4/lib/lz4.h +++ /dev/null @@ -1,842 +0,0 @@ -/* - * LZ4 - Fast LZ compression algorithm - * Header File - * Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repository : https://github.com/lz4/lz4 -*/ -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef LZ4_H_2983827168210 -#define LZ4_H_2983827168210 - -/* --- Dependency --- */ -#include /* size_t */ - - -/** - Introduction - - LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, - scalable with multi-cores CPU. It features an extremely fast decoder, with speed in - multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. - - The LZ4 compression library provides in-memory compression and decompression functions. - It gives full buffer control to user. - Compression can be done in: - - a single step (described as Simple Functions) - - a single step, reusing a context (described in Advanced Functions) - - unbounded multiple steps (described as Streaming compression) - - lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). - Decompressing such a compressed block requires additional metadata. - Exact metadata depends on exact decompression function. - For the typical case of LZ4_decompress_safe(), - metadata includes block's compressed size, and maximum bound of decompressed size. - Each application is free to encode and pass such metadata in whichever way it wants. - - lz4.h only handle blocks, it can not generate Frames. - - Blocks are different from Frames (doc/lz4_Frame_format.md). - Frames bundle both blocks and metadata in a specified manner. - Embedding metadata is required for compressed data to be self-contained and portable. - Frame format is delivered through a companion API, declared in lz4frame.h. - The `lz4` CLI can only manage frames. -*/ - -/*^*************************************************************** -* Export parameters -*****************************************************************/ -/* -* LZ4_DLL_EXPORT : -* Enable exporting of functions when building a Windows DLL -* LZ4LIB_VISIBILITY : -* Control library symbols visibility. -*/ -#ifndef LZ4LIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define LZ4LIB_VISIBILITY -# endif -#endif -#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY -#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define LZ4LIB_API LZ4LIB_VISIBILITY -#endif - -/*! LZ4_FREESTANDING : - * When this macro is set to 1, it enables "freestanding mode" that is - * suitable for typical freestanding environment which doesn't support - * standard C library. - * - * - LZ4_FREESTANDING is a compile-time switch. - * - It requires the following macros to be defined: - * LZ4_memcpy, LZ4_memmove, LZ4_memset. - * - It only enables LZ4/HC functions which don't use heap. - * All LZ4F_* functions are not supported. - * - See tests/freestanding.c to check its basic setup. - */ -#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1) -# define LZ4_HEAPMODE 0 -# define LZ4HC_HEAPMODE 0 -# define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1 -# if !defined(LZ4_memcpy) -# error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'." -# endif -# if !defined(LZ4_memset) -# error "LZ4_FREESTANDING requires macro 'LZ4_memset'." -# endif -# if !defined(LZ4_memmove) -# error "LZ4_FREESTANDING requires macro 'LZ4_memmove'." -# endif -#elif ! defined(LZ4_FREESTANDING) -# define LZ4_FREESTANDING 0 -#endif - - -/*------ Version ------*/ -#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ -#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */ - -#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) - -#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE -#define LZ4_QUOTE(str) #str -#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) -#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */ - -LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version; requires v1.3.0+ */ -LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version; requires v1.7.5+ */ - - -/*-************************************ -* Tuning parameter -**************************************/ -#define LZ4_MEMORY_USAGE_MIN 10 -#define LZ4_MEMORY_USAGE_DEFAULT 14 -#define LZ4_MEMORY_USAGE_MAX 20 - -/*! - * LZ4_MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; ) - * Increasing memory usage improves compression ratio, at the cost of speed. - * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality. - * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache - */ -#ifndef LZ4_MEMORY_USAGE -# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT -#endif - -#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN) -# error "LZ4_MEMORY_USAGE is too small !" -#endif - -#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX) -# error "LZ4_MEMORY_USAGE is too large !" -#endif - -/*-************************************ -* Simple Functions -**************************************/ -/*! LZ4_compress_default() : - * Compresses 'srcSize' bytes from buffer 'src' - * into already allocated 'dst' buffer of size 'dstCapacity'. - * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). - * It also runs faster, so it's a recommended setting. - * If the function cannot compress 'src' into a more limited 'dst' budget, - * compression stops *immediately*, and the function result is zero. - * In which case, 'dst' content is undefined (invalid). - * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. - * dstCapacity : size of buffer 'dst' (which must be already allocated) - * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) - * or 0 if compression fails - * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). - */ -LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); - -/*! LZ4_decompress_safe() : - * compressedSize : is the exact complete size of the compressed block. - * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size. - * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) - * If destination buffer is not large enough, decoding will stop and output an error code (negative value). - * If the source stream is detected malformed, the function will stop decoding and return a negative result. - * Note 1 : This function is protected against malicious data packets : - * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, - * even if the compressed block is maliciously modified to order the decoder to do these actions. - * In such case, the decoder stops immediately, and considers the compressed block malformed. - * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. - * The implementation is free to send / store / derive this information in whichever way is most beneficial. - * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. - */ -LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); - - -/*-************************************ -* Advanced Functions -**************************************/ -#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ -#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) - -/*! LZ4_compressBound() : - Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) - This function is primarily useful for memory allocation purposes (destination buffer size). - Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). - Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) - inputSize : max supported value is LZ4_MAX_INPUT_SIZE - return : maximum output size in a "worst case" scenario - or 0, if input size is incorrect (too large or negative) -*/ -LZ4LIB_API int LZ4_compressBound(int inputSize); - -/*! LZ4_compress_fast() : - Same as LZ4_compress_default(), but allows selection of "acceleration" factor. - The larger the acceleration value, the faster the algorithm, but also the lesser the compression. - It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. - An acceleration value of "1" is the same as regular LZ4_compress_default() - Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). - Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c). -*/ -LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - - -/*! LZ4_compress_fast_extState() : - * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. - * Use LZ4_sizeofState() to know how much memory must be allocated, - * and allocate it on 8-bytes boundaries (using `malloc()` typically). - * Then, provide this buffer as `void* state` to compression function. - */ -LZ4LIB_API int LZ4_sizeofState(void); -LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - - -/*! LZ4_compress_destSize() : - * Reverse the logic : compresses as much data as possible from 'src' buffer - * into already allocated buffer 'dst', of size >= 'targetDestSize'. - * This function either compresses the entire 'src' content into 'dst' if it's large enough, - * or fill 'dst' buffer completely with as much data as possible from 'src'. - * note: acceleration parameter is fixed to "default". - * - * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. - * New value is necessarily <= input value. - * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) - * or 0 if compression fails. - * - * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+): - * the produced compressed content could, in specific circumstances, - * require to be decompressed into a destination buffer larger - * by at least 1 byte than the content to decompress. - * If an application uses `LZ4_compress_destSize()`, - * it's highly recommended to update liblz4 to v1.9.2 or better. - * If this can't be done or ensured, - * the receiving decompression function should provide - * a dstCapacity which is > decompressedSize, by at least 1 byte. - * See https://github.com/lz4/lz4/issues/859 for details - */ -LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); - - -/*! LZ4_decompress_safe_partial() : - * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', - * into destination buffer 'dst' of size 'dstCapacity'. - * Up to 'targetOutputSize' bytes will be decoded. - * The function stops decoding on reaching this objective. - * This can be useful to boost performance - * whenever only the beginning of a block is required. - * - * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize) - * If source stream is detected malformed, function returns a negative result. - * - * Note 1 : @return can be < targetOutputSize, if compressed block contains less data. - * - * Note 2 : targetOutputSize must be <= dstCapacity - * - * Note 3 : this function effectively stops decoding on reaching targetOutputSize, - * so dstCapacity is kind of redundant. - * This is because in older versions of this function, - * decoding operation would still write complete sequences. - * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize, - * it could write more bytes, though only up to dstCapacity. - * Some "margin" used to be required for this operation to work properly. - * Thankfully, this is no longer necessary. - * The function nonetheless keeps the same signature, in an effort to preserve API compatibility. - * - * Note 4 : If srcSize is the exact size of the block, - * then targetOutputSize can be any value, - * including larger than the block's decompressed size. - * The function will, at most, generate block's decompressed size. - * - * Note 5 : If srcSize is _larger_ than block's compressed size, - * then targetOutputSize **MUST** be <= block's decompressed size. - * Otherwise, *silent corruption will occur*. - */ -LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); - - -/*-********************************************* -* Streaming Compression Functions -***********************************************/ -typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ - -/** - Note about RC_INVOKED - - - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio). - https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros - - - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars) - and reports warning "RC4011: identifier truncated". - - - To eliminate the warning, we surround long preprocessor symbol with - "#if !defined(RC_INVOKED) ... #endif" block that means - "skip this block when rc.exe is trying to read it". -*/ -#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); -LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); -#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ -#endif - -/*! LZ4_resetStream_fast() : v1.9.0+ - * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks - * (e.g., LZ4_compress_fast_continue()). - * - * An LZ4_stream_t must be initialized once before usage. - * This is automatically done when created by LZ4_createStream(). - * However, should the LZ4_stream_t be simply declared on stack (for example), - * it's necessary to initialize it first, using LZ4_initStream(). - * - * After init, start any new stream with LZ4_resetStream_fast(). - * A same LZ4_stream_t can be re-used multiple times consecutively - * and compress multiple streams, - * provided that it starts each new stream with LZ4_resetStream_fast(). - * - * LZ4_resetStream_fast() is much faster than LZ4_initStream(), - * but is not compatible with memory regions containing garbage data. - * - * Note: it's only useful to call LZ4_resetStream_fast() - * in the context of streaming compression. - * The *extState* functions perform their own resets. - * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. - */ -LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); - -/*! LZ4_loadDict() : - * Use this function to reference a static dictionary into LZ4_stream_t. - * The dictionary must remain available during compression. - * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. - * The same dictionary will have to be loaded on decompression side for successful decoding. - * Dictionary are useful for better compression of small data (KB range). - * While LZ4 accept any input as dictionary, - * results are generally better when using Zstandard's Dictionary Builder. - * Loading a size of 0 is allowed, and is the same as reset. - * @return : loaded dictionary size, in bytes (necessarily <= 64 KB) - */ -LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); - -/*! LZ4_compress_fast_continue() : - * Compress 'src' content using data from previously compressed blocks, for better compression ratio. - * 'dst' buffer must be already allocated. - * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. - * - * @return : size of compressed block - * or 0 if there is an error (typically, cannot fit into 'dst'). - * - * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. - * Each block has precise boundaries. - * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. - * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. - * - * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! - * - * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. - * Make sure that buffers are separated, by at least one byte. - * This construction ensures that each block only depends on previous block. - * - * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. - * - * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. - */ -LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - -/*! LZ4_saveDict() : - * If last 64KB data cannot be guaranteed to remain available at its current memory location, - * save it into a safer place (char* safeBuffer). - * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), - * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. - * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. - */ -LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); - - -/*-********************************************** -* Streaming Decompression Functions -* Bufferless synchronous API -************************************************/ -typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ - -/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : - * creation / destruction of streaming decompression tracking context. - * A tracking context can be re-used multiple times. - */ -#if !defined(RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros */ -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); -LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); -#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */ -#endif - -/*! LZ4_setStreamDecode() : - * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. - * Use this function to start decompression of a new stream of blocks. - * A dictionary can optionally be set. Use NULL or size 0 for a reset order. - * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. - * @return : 1 if OK, 0 if error - */ -LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); - -/*! LZ4_decoderRingBufferSize() : v1.8.2+ - * Note : in a ring buffer scenario (optional), - * blocks are presumed decompressed next to each other - * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), - * at which stage it resumes from beginning of ring buffer. - * When setting such a ring buffer for streaming decompression, - * provides the minimum size of this ring buffer - * to be compatible with any source respecting maxBlockSize condition. - * @return : minimum ring buffer size, - * or 0 if there is an error (invalid maxBlockSize). - */ -LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); -#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ - -/*! LZ4_decompress_*_continue() : - * These decoding functions allow decompression of consecutive blocks in "streaming" mode. - * A block is an unsplittable entity, it must be presented entirely to a decompression function. - * Decompression functions only accepts one block at a time. - * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. - * If less than 64KB of data has been decoded, all the data must be present. - * - * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : - * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). - * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. - * In which case, encoding and decoding buffers do not need to be synchronized. - * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. - * - Synchronized mode : - * Decompression buffer size is _exactly_ the same as compression buffer size, - * and follows exactly same update rule (block boundaries at same positions), - * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), - * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). - * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. - * In which case, encoding and decoding buffers do not need to be synchronized, - * and encoding ring buffer can have any size, including small ones ( < 64 KB). - * - * Whenever these conditions are not possible, - * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, - * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. -*/ -LZ4LIB_API int -LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, - const char* src, char* dst, - int srcSize, int dstCapacity); - - -/*! LZ4_decompress_*_usingDict() : - * These decoding functions work the same as - * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() - * They are stand-alone, and don't need an LZ4_streamDecode_t structure. - * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. - * Performance tip : Decompression speed can be substantially increased - * when dst == dictStart + dictSize. - */ -LZ4LIB_API int -LZ4_decompress_safe_usingDict(const char* src, char* dst, - int srcSize, int dstCapacity, - const char* dictStart, int dictSize); - -LZ4LIB_API int -LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, - int compressedSize, - int targetOutputSize, int maxOutputSize, - const char* dictStart, int dictSize); - -#endif /* LZ4_H_2983827168210 */ - - -/*^************************************* - * !!!!!! STATIC LINKING ONLY !!!!!! - ***************************************/ - -/*-**************************************************************************** - * Experimental section - * - * Symbols declared in this section must be considered unstable. Their - * signatures or semantics may change, or they may be removed altogether in the - * future. They are therefore only safe to depend on when the caller is - * statically linked against the library. - * - * To protect against unsafe usage, not only are the declarations guarded, - * the definitions are hidden by default - * when building LZ4 as a shared/dynamic library. - * - * In order to access these declarations, - * define LZ4_STATIC_LINKING_ONLY in your application - * before including LZ4's headers. - * - * In order to make their implementations accessible dynamically, you must - * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. - ******************************************************************************/ - -#ifdef LZ4_STATIC_LINKING_ONLY - -#ifndef LZ4_STATIC_3504398509 -#define LZ4_STATIC_3504398509 - -#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS -#define LZ4LIB_STATIC_API LZ4LIB_API -#else -#define LZ4LIB_STATIC_API -#endif - - -/*! LZ4_compress_fast_extState_fastReset() : - * A variant of LZ4_compress_fast_extState(). - * - * Using this variant avoids an expensive initialization step. - * It is only safe to call if the state buffer is known to be correctly initialized already - * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). - * From a high level, the difference is that - * this function initializes the provided state with a call to something like LZ4_resetStream_fast() - * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). - */ -LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - -/*! LZ4_attach_dictionary() : - * This is an experimental API that allows - * efficient use of a static dictionary many times. - * - * Rather than re-loading the dictionary buffer into a working context before - * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a - * working LZ4_stream_t, this function introduces a no-copy setup mechanism, - * in which the working stream references the dictionary stream in-place. - * - * Several assumptions are made about the state of the dictionary stream. - * Currently, only streams which have been prepared by LZ4_loadDict() should - * be expected to work. - * - * Alternatively, the provided dictionaryStream may be NULL, - * in which case any existing dictionary stream is unset. - * - * If a dictionary is provided, it replaces any pre-existing stream history. - * The dictionary contents are the only history that can be referenced and - * logically immediately precede the data compressed in the first subsequent - * compression call. - * - * The dictionary will only remain attached to the working stream through the - * first compression call, at the end of which it is cleared. The dictionary - * stream (and source buffer) must remain in-place / accessible / unchanged - * through the completion of the first compression call on the stream. - */ -LZ4LIB_STATIC_API void -LZ4_attach_dictionary(LZ4_stream_t* workingStream, - const LZ4_stream_t* dictionaryStream); - - -/*! In-place compression and decompression - * - * It's possible to have input and output sharing the same buffer, - * for highly constrained memory environments. - * In both cases, it requires input to lay at the end of the buffer, - * and decompression to start at beginning of the buffer. - * Buffer size must feature some margin, hence be larger than final size. - * - * |<------------------------buffer--------------------------------->| - * |<-----------compressed data--------->| - * |<-----------decompressed size------------------>| - * |<----margin---->| - * - * This technique is more useful for decompression, - * since decompressed size is typically larger, - * and margin is short. - * - * In-place decompression will work inside any buffer - * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). - * This presumes that decompressedSize > compressedSize. - * Otherwise, it means compression actually expanded data, - * and it would be more efficient to store such data with a flag indicating it's not compressed. - * This can happen when data is not compressible (already compressed, or encrypted). - * - * For in-place compression, margin is larger, as it must be able to cope with both - * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, - * and data expansion, which can happen when input is not compressible. - * As a consequence, buffer size requirements are much higher, - * and memory savings offered by in-place compression are more limited. - * - * There are ways to limit this cost for compression : - * - Reduce history size, by modifying LZ4_DISTANCE_MAX. - * Note that it is a compile-time constant, so all compressions will apply this limit. - * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, - * so it's a reasonable trick when inputs are known to be small. - * - Require the compressor to deliver a "maximum compressed size". - * This is the `dstCapacity` parameter in `LZ4_compress*()`. - * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, - * in which case, the return code will be 0 (zero). - * The caller must be ready for these cases to happen, - * and typically design a backup scheme to send data uncompressed. - * The combination of both techniques can significantly reduce - * the amount of margin required for in-place compression. - * - * In-place compression can work in any buffer - * which size is >= (maxCompressedSize) - * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. - * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, - * so it's possible to reduce memory requirements by playing with them. - */ - -#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) -#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ - -#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ -# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ -#endif - -#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ -#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ - -#endif /* LZ4_STATIC_3504398509 */ -#endif /* LZ4_STATIC_LINKING_ONLY */ - - - -#ifndef LZ4_H_98237428734687 -#define LZ4_H_98237428734687 - -/*-************************************************************ - * Private Definitions - ************************************************************** - * Do not use these definitions directly. - * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. - * Accessing members will expose user code to API and/or ABI break in future versions of the library. - **************************************************************/ -#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) -#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) -#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ - -#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# include - typedef int8_t LZ4_i8; - typedef uint8_t LZ4_byte; - typedef uint16_t LZ4_u16; - typedef uint32_t LZ4_u32; -#else - typedef signed char LZ4_i8; - typedef unsigned char LZ4_byte; - typedef unsigned short LZ4_u16; - typedef unsigned int LZ4_u32; -#endif - -/*! LZ4_stream_t : - * Never ever use below internal definitions directly ! - * These definitions are not API/ABI safe, and may change in future versions. - * If you need static allocation, declare or allocate an LZ4_stream_t object. -**/ - -typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; -struct LZ4_stream_t_internal { - LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]; - const LZ4_byte* dictionary; - const LZ4_stream_t_internal* dictCtx; - LZ4_u32 currentOffset; - LZ4_u32 tableType; - LZ4_u32 dictSize; - /* Implicit padding to ensure structure is aligned */ -}; - -#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */ -union LZ4_stream_u { - char minStateSize[LZ4_STREAM_MINSIZE]; - LZ4_stream_t_internal internal_donotuse; -}; /* previously typedef'd to LZ4_stream_t */ - - -/*! LZ4_initStream() : v1.9.0+ - * An LZ4_stream_t structure must be initialized at least once. - * This is automatically done when invoking LZ4_createStream(), - * but it's not when the structure is simply declared on stack (for example). - * - * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. - * It can also initialize any arbitrary buffer of sufficient size, - * and will @return a pointer of proper type upon initialization. - * - * Note : initialization fails if size and alignment conditions are not respected. - * In which case, the function will @return NULL. - * Note2: An LZ4_stream_t structure guarantees correct alignment and size. - * Note3: Before v1.9.0, use LZ4_resetStream() instead -**/ -LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size); - - -/*! LZ4_streamDecode_t : - * Never ever use below internal definitions directly ! - * These definitions are not API/ABI safe, and may change in future versions. - * If you need static allocation, declare or allocate an LZ4_streamDecode_t object. -**/ -typedef struct { - const LZ4_byte* externalDict; - const LZ4_byte* prefixEnd; - size_t extDictSize; - size_t prefixSize; -} LZ4_streamDecode_t_internal; - -#define LZ4_STREAMDECODE_MINSIZE 32 -union LZ4_streamDecode_u { - char minStateSize[LZ4_STREAMDECODE_MINSIZE]; - LZ4_streamDecode_t_internal internal_donotuse; -} ; /* previously typedef'd to LZ4_streamDecode_t */ - - - -/*-************************************ -* Obsolete Functions -**************************************/ - -/*! Deprecation warnings - * - * Deprecated functions make the compiler generate a warning when invoked. - * This is meant to invite users to update their source code. - * Should deprecation warnings be a problem, it is generally possible to disable them, - * typically with -Wno-deprecated-declarations for gcc - * or _CRT_SECURE_NO_WARNINGS in Visual. - * - * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS - * before including the header file. - */ -#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS -# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ -#else -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define LZ4_DEPRECATED(message) [[deprecated(message)]] -# elif defined(_MSC_VER) -# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) -# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45)) -# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) -# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31) -# define LZ4_DEPRECATED(message) __attribute__((deprecated)) -# else -# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler") -# define LZ4_DEPRECATED(message) /* disabled */ -# endif -#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ - -/*! Obsolete compression functions (since v1.7.3) */ -LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); -LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); -LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); - -/*! Obsolete decompression functions (since v1.8.0) */ -LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); -LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); - -/* Obsolete streaming functions (since v1.7.0) - * degraded functionality; do not use! - * - * In order to perform streaming compression, these functions depended on data - * that is no longer tracked in the state. They have been preserved as well as - * possible: using them will still produce a correct output. However, they don't - * actually retain any history between compression calls. The compression ratio - * achieved will therefore be no better than compressing each chunk - * independently. - */ -LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); -LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); -LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); -LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); - -/*! Obsolete streaming decoding functions (since v1.7.0) */ -LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); -LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); - -/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) : - * These functions used to be faster than LZ4_decompress_safe(), - * but this is no longer the case. They are now slower. - * This is because LZ4_decompress_fast() doesn't know the input size, - * and therefore must progress more cautiously into the input buffer to not read beyond the end of block. - * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. - * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. - * - * The last remaining LZ4_decompress_fast() specificity is that - * it can decompress a block without knowing its compressed size. - * Such functionality can be achieved in a more secure manner - * by employing LZ4_decompress_safe_partial(). - * - * Parameters: - * originalSize : is the uncompressed size to regenerate. - * `dst` must be already allocated, its size must be >= 'originalSize' bytes. - * @return : number of bytes read from source buffer (== compressed size). - * The function expects to finish at block's end exactly. - * If the source stream is detected malformed, the function stops decoding and returns a negative result. - * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. - * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. - * Also, since match offsets are not validated, match reads from 'src' may underflow too. - * These issues never happen if input (compressed) data is correct. - * But they may happen if input data is invalid (error or intentional tampering). - * As a consequence, use these functions in trusted environments with trusted data **only**. - */ -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead") -LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead") -LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead") -LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); - -/*! LZ4_resetStream() : - * An LZ4_stream_t structure must be initialized at least once. - * This is done with LZ4_initStream(), or LZ4_resetStream(). - * Consider switching to LZ4_initStream(), - * invoking LZ4_resetStream() will trigger deprecation warnings in the future. - */ -LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); - - -#endif /* LZ4_H_98237428734687 */ - - -#if defined (__cplusplus) -} -#endif diff --git a/librocksdb-sys/lz4/lib/lz4file.c b/librocksdb-sys/lz4/lib/lz4file.c deleted file mode 100644 index eaf9b17..0000000 --- a/librocksdb-sys/lz4/lib/lz4file.c +++ /dev/null @@ -1,311 +0,0 @@ -/* - * LZ4 file library - * Copyright (C) 2022, Xiaomi Inc. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You can contact the author at : - * - LZ4 homepage : http://www.lz4.org - * - LZ4 source repository : https://github.com/lz4/lz4 - */ -#include -#include -#include "lz4.h" -#include "lz4file.h" - -struct LZ4_readFile_s { - LZ4F_dctx* dctxPtr; - FILE* fp; - LZ4_byte* srcBuf; - size_t srcBufNext; - size_t srcBufSize; - size_t srcBufMaxSize; -}; - -struct LZ4_writeFile_s { - LZ4F_cctx* cctxPtr; - FILE* fp; - LZ4_byte* dstBuf; - size_t maxWriteSize; - size_t dstBufMaxSize; - LZ4F_errorCode_t errCode; -}; - -LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp) -{ - char buf[LZ4F_HEADER_SIZE_MAX]; - size_t consumedSize; - LZ4F_errorCode_t ret; - LZ4F_frameInfo_t info; - - if (fp == NULL || lz4fRead == NULL) { - return -LZ4F_ERROR_GENERIC; - } - - *lz4fRead = (LZ4_readFile_t*)calloc(1, sizeof(LZ4_readFile_t)); - if (*lz4fRead == NULL) { - return -LZ4F_ERROR_allocation_failed; - } - - ret = LZ4F_createDecompressionContext(&(*lz4fRead)->dctxPtr, LZ4F_getVersion()); - if (LZ4F_isError(ret)) { - free(*lz4fRead); - return ret; - } - - (*lz4fRead)->fp = fp; - consumedSize = fread(buf, 1, sizeof(buf), (*lz4fRead)->fp); - if (consumedSize != sizeof(buf)) { - free(*lz4fRead); - return -LZ4F_ERROR_GENERIC; - } - - ret = LZ4F_getFrameInfo((*lz4fRead)->dctxPtr, &info, buf, &consumedSize); - if (LZ4F_isError(ret)) { - LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr); - free(*lz4fRead); - return ret; - } - - switch (info.blockSizeID) { - case LZ4F_default : - case LZ4F_max64KB : - (*lz4fRead)->srcBufMaxSize = 64 * 1024; - break; - case LZ4F_max256KB: - (*lz4fRead)->srcBufMaxSize = 256 * 1024; - break; - case LZ4F_max1MB: - (*lz4fRead)->srcBufMaxSize = 1 * 1024 * 1024; - break; - case LZ4F_max4MB: - (*lz4fRead)->srcBufMaxSize = 4 * 1024 * 1024; - break; - default: - LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr); - free(*lz4fRead); - return -LZ4F_ERROR_maxBlockSize_invalid; - } - - (*lz4fRead)->srcBuf = (LZ4_byte*)malloc((*lz4fRead)->srcBufMaxSize); - if ((*lz4fRead)->srcBuf == NULL) { - LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr); - free(lz4fRead); - return -LZ4F_ERROR_allocation_failed; - } - - (*lz4fRead)->srcBufSize = sizeof(buf) - consumedSize; - memcpy((*lz4fRead)->srcBuf, buf + consumedSize, (*lz4fRead)->srcBufSize); - - return ret; -} - -size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size) -{ - LZ4_byte* p = (LZ4_byte*)buf; - size_t next = 0; - - if (lz4fRead == NULL || buf == NULL) - return -LZ4F_ERROR_GENERIC; - - while (next < size) { - size_t srcsize = lz4fRead->srcBufSize - lz4fRead->srcBufNext; - size_t dstsize = size - next; - size_t ret; - - if (srcsize == 0) { - ret = fread(lz4fRead->srcBuf, 1, lz4fRead->srcBufMaxSize, lz4fRead->fp); - if (ret > 0) { - lz4fRead->srcBufSize = ret; - srcsize = lz4fRead->srcBufSize; - lz4fRead->srcBufNext = 0; - } - else if (ret == 0) { - break; - } - else { - return -LZ4F_ERROR_GENERIC; - } - } - - ret = LZ4F_decompress(lz4fRead->dctxPtr, - p, &dstsize, - lz4fRead->srcBuf + lz4fRead->srcBufNext, - &srcsize, - NULL); - if (LZ4F_isError(ret)) { - return ret; - } - - lz4fRead->srcBufNext += srcsize; - next += dstsize; - p += dstsize; - } - - return next; -} - -LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead) -{ - if (lz4fRead == NULL) - return -LZ4F_ERROR_GENERIC; - LZ4F_freeDecompressionContext(lz4fRead->dctxPtr); - free(lz4fRead->srcBuf); - free(lz4fRead); - return LZ4F_OK_NoError; -} - -LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr) -{ - LZ4_byte buf[LZ4F_HEADER_SIZE_MAX]; - size_t ret; - - if (fp == NULL || lz4fWrite == NULL) - return -LZ4F_ERROR_GENERIC; - - *lz4fWrite = (LZ4_writeFile_t*)malloc(sizeof(LZ4_writeFile_t)); - if (*lz4fWrite == NULL) { - return -LZ4F_ERROR_allocation_failed; - } - if (prefsPtr != NULL) { - switch (prefsPtr->frameInfo.blockSizeID) { - case LZ4F_default : - case LZ4F_max64KB : - (*lz4fWrite)->maxWriteSize = 64 * 1024; - break; - case LZ4F_max256KB: - (*lz4fWrite)->maxWriteSize = 256 * 1024; - break; - case LZ4F_max1MB: - (*lz4fWrite)->maxWriteSize = 1 * 1024 * 1024; - break; - case LZ4F_max4MB: - (*lz4fWrite)->maxWriteSize = 4 * 1024 * 1024; - break; - default: - free(lz4fWrite); - return -LZ4F_ERROR_maxBlockSize_invalid; - } - } else { - (*lz4fWrite)->maxWriteSize = 64 * 1024; - } - - (*lz4fWrite)->dstBufMaxSize = LZ4F_compressBound((*lz4fWrite)->maxWriteSize, prefsPtr); - (*lz4fWrite)->dstBuf = (LZ4_byte*)malloc((*lz4fWrite)->dstBufMaxSize); - if ((*lz4fWrite)->dstBuf == NULL) { - free(*lz4fWrite); - return -LZ4F_ERROR_allocation_failed; - } - - ret = LZ4F_createCompressionContext(&(*lz4fWrite)->cctxPtr, LZ4F_getVersion()); - if (LZ4F_isError(ret)) { - free((*lz4fWrite)->dstBuf); - free(*lz4fWrite); - return ret; - } - - ret = LZ4F_compressBegin((*lz4fWrite)->cctxPtr, buf, LZ4F_HEADER_SIZE_MAX, prefsPtr); - if (LZ4F_isError(ret)) { - LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr); - free((*lz4fWrite)->dstBuf); - free(*lz4fWrite); - return ret; - } - - if (ret != fwrite(buf, 1, ret, fp)) { - LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr); - free((*lz4fWrite)->dstBuf); - free(*lz4fWrite); - return -LZ4F_ERROR_GENERIC; - } - - (*lz4fWrite)->fp = fp; - (*lz4fWrite)->errCode = LZ4F_OK_NoError; - return LZ4F_OK_NoError; -} - -size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, void* buf, size_t size) -{ - LZ4_byte* p = (LZ4_byte*)buf; - size_t remain = size; - size_t chunk; - size_t ret; - - if (lz4fWrite == NULL || buf == NULL) - return -LZ4F_ERROR_GENERIC; - while (remain) { - if (remain > lz4fWrite->maxWriteSize) - chunk = lz4fWrite->maxWriteSize; - else - chunk = remain; - - ret = LZ4F_compressUpdate(lz4fWrite->cctxPtr, - lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize, - p, chunk, - NULL); - if (LZ4F_isError(ret)) { - lz4fWrite->errCode = ret; - return ret; - } - - if(ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) { - lz4fWrite->errCode = -LZ4F_ERROR_GENERIC; - return -LZ4F_ERROR_GENERIC; - } - - p += chunk; - remain -= chunk; - } - - return size; -} - -LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite) -{ - LZ4F_errorCode_t ret = LZ4F_OK_NoError; - - if (lz4fWrite == NULL) - return -LZ4F_ERROR_GENERIC; - - if (lz4fWrite->errCode == LZ4F_OK_NoError) { - ret = LZ4F_compressEnd(lz4fWrite->cctxPtr, - lz4fWrite->dstBuf, lz4fWrite->dstBufMaxSize, - NULL); - if (LZ4F_isError(ret)) { - goto out; - } - - if (ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) { - ret = -LZ4F_ERROR_GENERIC; - } - } - -out: - LZ4F_freeCompressionContext(lz4fWrite->cctxPtr); - free(lz4fWrite->dstBuf); - free(lz4fWrite); - return ret; -} diff --git a/librocksdb-sys/lz4/lib/lz4file.h b/librocksdb-sys/lz4/lib/lz4file.h deleted file mode 100644 index 5527130..0000000 --- a/librocksdb-sys/lz4/lib/lz4file.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - LZ4 file library - Header File - Copyright (C) 2022, Xiaomi Inc. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef LZ4FILE_H -#define LZ4FILE_H - -#include -#include "lz4frame_static.h" - -typedef struct LZ4_readFile_s LZ4_readFile_t; -typedef struct LZ4_writeFile_s LZ4_writeFile_t; - -/*! LZ4F_readOpen() : - * Set read lz4file handle. - * `lz4f` will set a lz4file handle. - * `fp` must be the return value of the lz4 file opened by fopen. - */ -LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp); - -/*! LZ4F_read() : - * Read lz4file content to buffer. - * `lz4f` must use LZ4_readOpen to set first. - * `buf` read data buffer. - * `size` read data buffer size. - */ -LZ4FLIB_STATIC_API size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size); - -/*! LZ4F_readClose() : - * Close lz4file handle. - * `lz4f` must use LZ4_readOpen to set first. - */ -LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead); - -/*! LZ4F_writeOpen() : - * Set write lz4file handle. - * `lz4f` will set a lz4file handle. - * `fp` must be the return value of the lz4 file opened by fopen. - */ -LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr); - -/*! LZ4F_write() : - * Write buffer to lz4file. - * `lz4f` must use LZ4F_writeOpen to set first. - * `buf` write data buffer. - * `size` write data buffer size. - */ -LZ4FLIB_STATIC_API size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, void* buf, size_t size); - -/*! LZ4F_writeClose() : - * Close lz4file handle. - * `lz4f` must use LZ4F_writeOpen to set first. - */ -LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite); - -#endif /* LZ4FILE_H */ - -#if defined (__cplusplus) -} -#endif diff --git a/librocksdb-sys/lz4/lib/lz4frame.c b/librocksdb-sys/lz4/lib/lz4frame.c deleted file mode 100644 index 174f9ae..0000000 --- a/librocksdb-sys/lz4/lib/lz4frame.c +++ /dev/null @@ -1,2078 +0,0 @@ -/* - * LZ4 auto-framing library - * Copyright (C) 2011-2016, Yann Collet. - * - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You can contact the author at : - * - LZ4 homepage : http://www.lz4.org - * - LZ4 source repository : https://github.com/lz4/lz4 - */ - -/* LZ4F is a stand-alone API to create LZ4-compressed Frames - * in full conformance with specification v1.6.1 . - * This library rely upon memory management capabilities (malloc, free) - * provided either by , - * or redirected towards another library of user's choice - * (see Memory Routines below). - */ - - -/*-************************************ -* Compiler Options -**************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif - - -/*-************************************ -* Tuning parameters -**************************************/ -/* - * LZ4F_HEAPMODE : - * Select how default compression functions will allocate memory for their hash table, - * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). - */ -#ifndef LZ4F_HEAPMODE -# define LZ4F_HEAPMODE 0 -#endif - - -/*-************************************ -* Library declarations -**************************************/ -#define LZ4F_STATIC_LINKING_ONLY -#include "lz4frame.h" -#define LZ4_STATIC_LINKING_ONLY -#include "lz4.h" -#define LZ4_HC_STATIC_LINKING_ONLY -#include "lz4hc.h" -#define XXH_STATIC_LINKING_ONLY -#include "xxhash.h" - - -/*-************************************ -* Memory routines -**************************************/ -/* - * User may redirect invocations of - * malloc(), calloc() and free() - * towards another library or solution of their choice - * by modifying below section. -**/ - -#include /* memset, memcpy, memmove */ -#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ -# define MEM_INIT(p,v,s) memset((p),(v),(s)) -#endif - -#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ -# include /* malloc, calloc, free */ -# define ALLOC(s) malloc(s) -# define ALLOC_AND_ZERO(s) calloc(1,(s)) -# define FREEMEM(p) free(p) -#endif - -static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem) -{ - /* custom calloc defined : use it */ - if (cmem.customCalloc != NULL) { - return cmem.customCalloc(cmem.opaqueState, s); - } - /* nothing defined : use default 's calloc() */ - if (cmem.customAlloc == NULL) { - return ALLOC_AND_ZERO(s); - } - /* only custom alloc defined : use it, and combine it with memset() */ - { void* const p = cmem.customAlloc(cmem.opaqueState, s); - if (p != NULL) MEM_INIT(p, 0, s); - return p; -} } - -static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem) -{ - /* custom malloc defined : use it */ - if (cmem.customAlloc != NULL) { - return cmem.customAlloc(cmem.opaqueState, s); - } - /* nothing defined : use default 's malloc() */ - return ALLOC(s); -} - -static void LZ4F_free(void* p, LZ4F_CustomMem cmem) -{ - /* custom malloc defined : use it */ - if (cmem.customFree != NULL) { - cmem.customFree(cmem.opaqueState, p); - return; - } - /* nothing defined : use default 's free() */ - FREEMEM(p); -} - - -/*-************************************ -* Debug -**************************************/ -#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) -# include -#else -# ifndef assert -# define assert(condition) ((void)0) -# endif -#endif - -#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ - -#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG) -# include -static int g_debuglog_enable = 1; -# define DEBUGLOG(l, ...) { \ - if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ - fprintf(stderr, __FILE__ ": "); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, " \n"); \ - } } -#else -# define DEBUGLOG(l, ...) {} /* disabled */ -#endif - - -/*-************************************ -* Basic Types -**************************************/ -#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; -#else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; -#endif - - -/* unoptimized version; solves endianness & alignment issues */ -static U32 LZ4F_readLE32 (const void* src) -{ - const BYTE* const srcPtr = (const BYTE*)src; - U32 value32 = srcPtr[0]; - value32 += ((U32)srcPtr[1])<< 8; - value32 += ((U32)srcPtr[2])<<16; - value32 += ((U32)srcPtr[3])<<24; - return value32; -} - -static void LZ4F_writeLE32 (void* dst, U32 value32) -{ - BYTE* const dstPtr = (BYTE*)dst; - dstPtr[0] = (BYTE)value32; - dstPtr[1] = (BYTE)(value32 >> 8); - dstPtr[2] = (BYTE)(value32 >> 16); - dstPtr[3] = (BYTE)(value32 >> 24); -} - -static U64 LZ4F_readLE64 (const void* src) -{ - const BYTE* const srcPtr = (const BYTE*)src; - U64 value64 = srcPtr[0]; - value64 += ((U64)srcPtr[1]<<8); - value64 += ((U64)srcPtr[2]<<16); - value64 += ((U64)srcPtr[3]<<24); - value64 += ((U64)srcPtr[4]<<32); - value64 += ((U64)srcPtr[5]<<40); - value64 += ((U64)srcPtr[6]<<48); - value64 += ((U64)srcPtr[7]<<56); - return value64; -} - -static void LZ4F_writeLE64 (void* dst, U64 value64) -{ - BYTE* const dstPtr = (BYTE*)dst; - dstPtr[0] = (BYTE)value64; - dstPtr[1] = (BYTE)(value64 >> 8); - dstPtr[2] = (BYTE)(value64 >> 16); - dstPtr[3] = (BYTE)(value64 >> 24); - dstPtr[4] = (BYTE)(value64 >> 32); - dstPtr[5] = (BYTE)(value64 >> 40); - dstPtr[6] = (BYTE)(value64 >> 48); - dstPtr[7] = (BYTE)(value64 >> 56); -} - - -/*-************************************ -* Constants -**************************************/ -#ifndef LZ4_SRC_INCLUDED /* avoid double definition */ -# define KB *(1<<10) -# define MB *(1<<20) -# define GB *(1<<30) -#endif - -#define _1BIT 0x01 -#define _2BITS 0x03 -#define _3BITS 0x07 -#define _4BITS 0x0F -#define _8BITS 0xFF - -#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U -#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB - -static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */ -static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */ -static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */ -static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */ - - -/*-************************************ -* Structures and local types -**************************************/ - -typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t; - -typedef struct LZ4F_cctx_s -{ - LZ4F_CustomMem cmem; - LZ4F_preferences_t prefs; - U32 version; - U32 cStage; - const LZ4F_CDict* cdict; - size_t maxBlockSize; - size_t maxBufferSize; - BYTE* tmpBuff; /* internal buffer, for streaming */ - BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */ - size_t tmpInSize; /* amount of data to compress after tmpIn */ - U64 totalInSize; - XXH32_state_t xxh; - void* lz4CtxPtr; - U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ - U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ - LZ4F_blockCompression_t blockCompression; -} LZ4F_cctx_t; - - -/*-************************************ -* Error management -**************************************/ -#define LZ4F_GENERATE_STRING(STRING) #STRING, -static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) }; - - -unsigned LZ4F_isError(LZ4F_errorCode_t code) -{ - return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode)); -} - -const char* LZ4F_getErrorName(LZ4F_errorCode_t code) -{ - static const char* codeError = "Unspecified error code"; - if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)]; - return codeError; -} - -LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult) -{ - if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError; - return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult); -} - -static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code) -{ - /* A compilation error here means sizeof(ptrdiff_t) is not large enough */ - LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); - return (LZ4F_errorCode_t)-(ptrdiff_t)code; -} - -#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e) - -#define RETURN_ERROR_IF(c,e) if (c) RETURN_ERROR(e) - -#define FORWARD_IF_ERROR(r) if (LZ4F_isError(r)) return (r) - -unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; } - -int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; } - -size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID) -{ - static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB }; - - if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; - if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB) - RETURN_ERROR(maxBlockSize_invalid); - { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB; - return blockSizes[blockSizeIdx]; -} } - -/*-************************************ -* Private functions -**************************************/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) - -static BYTE LZ4F_headerChecksum (const void* header, size_t length) -{ - U32 const xxh = XXH32(header, length, 0); - return (BYTE)(xxh >> 8); -} - - -/*-************************************ -* Simple-pass compression functions -**************************************/ -static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID, - const size_t srcSize) -{ - LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB; - size_t maxBlockSize = 64 KB; - while (requestedBSID > proposedBSID) { - if (srcSize <= maxBlockSize) - return proposedBSID; - proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1); - maxBlockSize <<= 2; - } - return requestedBSID; -} - -/*! LZ4F_compressBound_internal() : - * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. - * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario. - * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. - */ -static size_t LZ4F_compressBound_internal(size_t srcSize, - const LZ4F_preferences_t* preferencesPtr, - size_t alreadyBuffered) -{ - LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES; - prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */ - prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */ - { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr; - U32 const flush = prefsPtr->autoFlush | (srcSize==0); - LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID; - size_t const blockSize = LZ4F_getBlockSize(blockID); - size_t const maxBuffered = blockSize - 1; - size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered); - size_t const maxSrcSize = srcSize + bufferedSize; - unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize); - size_t const partialBlockSize = maxSrcSize & (blockSize-1); - size_t const lastBlockSize = flush ? partialBlockSize : 0; - unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0); - - size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag; - size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize); - - return ((BHSize + blockCRCSize) * nbBlocks) + - (blockSize * nbFullBlocks) + lastBlockSize + frameEnd; - } -} - -size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) -{ - LZ4F_preferences_t prefs; - size_t const headerSize = maxFHSize; /* max header size, including optional fields */ - - if (preferencesPtr!=NULL) prefs = *preferencesPtr; - else MEM_INIT(&prefs, 0, sizeof(prefs)); - prefs.autoFlush = 1; - - return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);; -} - - -/*! LZ4F_compressFrame_usingCDict() : - * Compress srcBuffer using a dictionary, in a single step. - * cdict can be NULL, in which case, no dictionary is used. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, - * however, it's the only way to provide a dictID, so it's not recommended. - * @return : number of bytes written into dstBuffer, - * or an error code if it fails (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr) -{ - LZ4F_preferences_t prefs; - LZ4F_compressOptions_t options; - BYTE* const dstStart = (BYTE*) dstBuffer; - BYTE* dstPtr = dstStart; - BYTE* const dstEnd = dstStart + dstCapacity; - - if (preferencesPtr!=NULL) - prefs = *preferencesPtr; - else - MEM_INIT(&prefs, 0, sizeof(prefs)); - if (prefs.frameInfo.contentSize != 0) - prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */ - - prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize); - prefs.autoFlush = 1; - if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID)) - prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */ - - MEM_INIT(&options, 0, sizeof(options)); - options.stableSrc = 1; - - RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall); - - { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */ - FORWARD_IF_ERROR(headerSize); - dstPtr += headerSize; /* header size */ } - - assert(dstEnd >= dstPtr); - { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options); - FORWARD_IF_ERROR(cSize); - dstPtr += cSize; } - - assert(dstEnd >= dstPtr); - { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */ - FORWARD_IF_ERROR(tailSize); - dstPtr += tailSize; } - - assert(dstEnd >= dstStart); - return (size_t)(dstPtr - dstStart); -} - - -/*! LZ4F_compressFrame() : - * Compress an entire srcBuffer into a valid LZ4 frame, in a single step. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_preferences_t* preferencesPtr) -{ - size_t result; -#if (LZ4F_HEAPMODE) - LZ4F_cctx_t* cctxPtr; - result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION); - FORWARD_IF_ERROR(result); -#else - LZ4F_cctx_t cctx; - LZ4_stream_t lz4ctx; - LZ4F_cctx_t* const cctxPtr = &cctx; - - MEM_INIT(&cctx, 0, sizeof(cctx)); - cctx.version = LZ4F_VERSION; - cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */ - if ( preferencesPtr == NULL - || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) { - LZ4_initStream(&lz4ctx, sizeof(lz4ctx)); - cctxPtr->lz4CtxPtr = &lz4ctx; - cctxPtr->lz4CtxAlloc = 1; - cctxPtr->lz4CtxState = 1; - } -#endif - DEBUGLOG(4, "LZ4F_compressFrame"); - - result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity, - srcBuffer, srcSize, - NULL, preferencesPtr); - -#if (LZ4F_HEAPMODE) - LZ4F_freeCompressionContext(cctxPtr); -#else - if ( preferencesPtr != NULL - && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) { - LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); - } -#endif - return result; -} - - -/*-*************************************************** -* Dictionary compression -*****************************************************/ - -struct LZ4F_CDict_s { - LZ4F_CustomMem cmem; - void* dictContent; - LZ4_stream_t* fastCtx; - LZ4_streamHC_t* HCCtx; -}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */ - -LZ4F_CDict* -LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize) -{ - const char* dictStart = (const char*)dictBuffer; - LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem); - DEBUGLOG(4, "LZ4F_createCDict_advanced"); - if (!cdict) return NULL; - cdict->cmem = cmem; - if (dictSize > 64 KB) { - dictStart += dictSize - 64 KB; - dictSize = 64 KB; - } - cdict->dictContent = LZ4F_malloc(dictSize, cmem); - cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem); - if (cdict->fastCtx) - LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t)); - cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem); - if (cdict->HCCtx) - LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t)); - if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) { - LZ4F_freeCDict(cdict); - return NULL; - } - memcpy(cdict->dictContent, dictStart, dictSize); - LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize); - LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT); - LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize); - return cdict; -} - -/*! LZ4F_createCDict() : - * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. - * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. - * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict - * @return : digested dictionary for compression, or NULL if failed */ -LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize) -{ - DEBUGLOG(4, "LZ4F_createCDict"); - return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize); -} - -void LZ4F_freeCDict(LZ4F_CDict* cdict) -{ - if (cdict==NULL) return; /* support free on NULL */ - LZ4F_free(cdict->dictContent, cdict->cmem); - LZ4F_free(cdict->fastCtx, cdict->cmem); - LZ4F_free(cdict->HCCtx, cdict->cmem); - LZ4F_free(cdict, cdict->cmem); -} - - -/*-********************************* -* Advanced compression functions -***********************************/ - -LZ4F_cctx* -LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) -{ - LZ4F_cctx* const cctxPtr = - (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem); - if (cctxPtr==NULL) return NULL; - - cctxPtr->cmem = customMem; - cctxPtr->version = version; - cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */ - - return cctxPtr; -} - -/*! LZ4F_createCompressionContext() : - * The first thing to do is to create a compressionContext object, which will be used in all compression operations. - * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure. - * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries. - * The function will provide a pointer to an allocated LZ4F_compressionContext_t object. - * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation. - * Object can release its memory using LZ4F_freeCompressionContext(); -**/ -LZ4F_errorCode_t -LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version) -{ - assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */ - /* in case it nonetheless happen in production */ - RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null); - - *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version); - RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed); - return LZ4F_OK_NoError; -} - - -LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr) -{ - if (cctxPtr != NULL) { /* support free on NULL */ - LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */ - LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); - LZ4F_free(cctxPtr, cctxPtr->cmem); - } - return LZ4F_OK_NoError; -} - - -/** - * This function prepares the internal LZ4(HC) stream for a new compression, - * resetting the context and attaching the dictionary, if there is one. - * - * It needs to be called at the beginning of each independent compression - * stream (i.e., at the beginning of a frame in blockLinked mode, or at the - * beginning of each block in blockIndependent mode). - */ -static void LZ4F_initStream(void* ctx, - const LZ4F_CDict* cdict, - int level, - LZ4F_blockMode_t blockMode) { - if (level < LZ4HC_CLEVEL_MIN) { - if (cdict != NULL || blockMode == LZ4F_blockLinked) { - /* In these cases, we will call LZ4_compress_fast_continue(), - * which needs an already reset context. Otherwise, we'll call a - * one-shot API. The non-continued APIs internally perform their own - * resets at the beginning of their calls, where they know what - * tableType they need the context to be in. So in that case this - * would be misguided / wasted work. */ - LZ4_resetStream_fast((LZ4_stream_t*)ctx); - } - LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL); - } else { - LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level); - LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL); - } -} - -static int ctxTypeID_to_size(int ctxTypeID) { - switch(ctxTypeID) { - case 1: - return LZ4_sizeofState(); - case 2: - return LZ4_sizeofStateHC(); - default: - return 0; - } -} - -/*! LZ4F_compressBegin_usingCDict() : - * init streaming compression AND writes frame header into @dstBuffer. - * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * @return : number of bytes written into @dstBuffer for the header - * or an error code (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr) -{ - LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES; - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - - RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall); - if (preferencesPtr == NULL) preferencesPtr = &prefNull; - cctxPtr->prefs = *preferencesPtr; - - /* cctx Management */ - { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; - int requiredSize = ctxTypeID_to_size(ctxTypeID); - int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc); - if (allocatedSize < requiredSize) { - /* not enough space allocated */ - LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { - /* must take ownership of memory allocation, - * in order to respect custom allocator contract */ - cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem); - if (cctxPtr->lz4CtxPtr) - LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t)); - } else { - cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem); - if (cctxPtr->lz4CtxPtr) - LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t)); - } - RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed); - cctxPtr->lz4CtxAlloc = ctxTypeID; - cctxPtr->lz4CtxState = ctxTypeID; - } else if (cctxPtr->lz4CtxState != ctxTypeID) { - /* otherwise, a sufficient buffer is already allocated, - * but we need to reset it to the correct context type */ - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { - LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t)); - } else { - LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t)); - LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); - } - cctxPtr->lz4CtxState = ctxTypeID; - } } - - /* Buffer Management */ - if (cctxPtr->prefs.frameInfo.blockSizeID == 0) - cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; - cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID); - - { size_t const requiredBuffSize = preferencesPtr->autoFlush ? - ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */ - cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0); - - if (cctxPtr->maxBufferSize < requiredBuffSize) { - cctxPtr->maxBufferSize = 0; - LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); - cctxPtr->tmpBuff = (BYTE*)LZ4F_calloc(requiredBuffSize, cctxPtr->cmem); - RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed); - cctxPtr->maxBufferSize = requiredBuffSize; - } } - cctxPtr->tmpIn = cctxPtr->tmpBuff; - cctxPtr->tmpInSize = 0; - (void)XXH32_reset(&(cctxPtr->xxh), 0); - - /* context init */ - cctxPtr->cdict = cdict; - if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) { - /* frame init only for blockLinked : blockIndependent will be init at each block */ - LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked); - } - if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) { - LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed); - } - - /* Magic Number */ - LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER); - dstPtr += 4; - { BYTE* const headerStart = dstPtr; - - /* FLG Byte */ - *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */ - + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5) - + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4) - + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3) - + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2) - + (cctxPtr->prefs.frameInfo.dictID > 0) ); - /* BD Byte */ - *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4); - /* Optional Frame content size field */ - if (cctxPtr->prefs.frameInfo.contentSize) { - LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize); - dstPtr += 8; - cctxPtr->totalInSize = 0; - } - /* Optional dictionary ID field */ - if (cctxPtr->prefs.frameInfo.dictID) { - LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID); - dstPtr += 4; - } - /* Header CRC Byte */ - *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart)); - dstPtr++; - } - - cctxPtr->cStage = 1; /* header written, now request input data block */ - return (size_t)(dstPtr - dstStart); -} - - -/*! LZ4F_compressBegin() : - * init streaming compression AND writes frame header into @dstBuffer. - * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * @preferencesPtr can be NULL, in which case default parameters are selected. - * @return : number of bytes written into dstBuffer for the header - * or an error code (can be tested using LZ4F_isError()) - */ -size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_preferences_t* preferencesPtr) -{ - return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity, - NULL, preferencesPtr); -} - - -/* LZ4F_compressBound() : - * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario. - * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario. - * This function cannot fail. - */ -size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) -{ - if (preferencesPtr && preferencesPtr->autoFlush) { - return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0); - } - return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1); -} - - -typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict); - - -/*! LZ4F_makeBlock(): - * compress a single block, add header and optional checksum. - * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize - */ -static size_t LZ4F_makeBlock(void* dst, - const void* src, size_t srcSize, - compressFunc_t compress, void* lz4ctx, int level, - const LZ4F_CDict* cdict, - LZ4F_blockChecksum_t crcFlag) -{ - BYTE* const cSizePtr = (BYTE*)dst; - U32 cSize; - assert(compress != NULL); - cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize), - (int)(srcSize), (int)(srcSize-1), - level, cdict); - - if (cSize == 0 || cSize >= srcSize) { - cSize = (U32)srcSize; - LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG); - memcpy(cSizePtr+BHSize, src, srcSize); - } else { - LZ4F_writeLE32(cSizePtr, cSize); - } - if (crcFlag) { - U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */ - LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32); - } - return BHSize + cSize + ((U32)crcFlag)*BFSize; -} - - -static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - int const acceleration = (level < 0) ? -level + 1 : 1; - DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize); - LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); - if (cdict) { - return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); - } else { - return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration); - } -} - -static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - int const acceleration = (level < 0) ? -level + 1 : 1; - (void)cdict; /* init once at beginning of frame */ - DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize); - return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); -} - -static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); - if (cdict) { - return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); - } - return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level); -} - -static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - (void)level; (void)cdict; /* init once at beginning of frame */ - return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); -} - -static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) -{ - (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict; - return 0; -} - -static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode) -{ - if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock; - if (level < LZ4HC_CLEVEL_MIN) { - if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock; - return LZ4F_compressBlock_continue; - } - if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC; - return LZ4F_compressBlockHC_continue; -} - -/* Save history (up to 64KB) into @tmpBuff */ -static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr) -{ - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) - return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB); - return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB); -} - -typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus; - -static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } }; - - - /*! LZ4F_compressUpdateImpl() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * When successful, the function always entirely consumes @srcBuffer. - * src data is either buffered or compressed into @dstBuffer. - * If the block compression does not match the compression of the previous block, the old data is flushed - * and operations continue with the new compression mode. - * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on. - * @compressOptionsPtr is optional : provide NULL to mean "default". - * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. - * or an error code if it fails (which can be tested using LZ4F_isError()) - * After an error, the state is left in a UB state, and must be re-initialized. - */ -static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* compressOptionsPtr, - LZ4F_blockCompression_t blockCompression) - { - size_t const blockSize = cctxPtr->maxBlockSize; - const BYTE* srcPtr = (const BYTE*)srcBuffer; - const BYTE* const srcEnd = srcPtr + srcSize; - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - LZ4F_lastBlockStatus lastBlockCompressed = notDone; - compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression); - size_t bytesWritten; - DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize); - - RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */ - if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) - RETURN_ERROR(dstMaxSize_tooSmall); - - if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize) - RETURN_ERROR(dstMaxSize_tooSmall); - - /* flush currently written block, to continue with new block compression */ - if (cctxPtr->blockCompression != blockCompression) { - bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); - dstPtr += bytesWritten; - cctxPtr->blockCompression = blockCompression; - } - - if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull; - - /* complete tmp buffer */ - if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */ - size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize; - assert(blockSize > cctxPtr->tmpInSize); - if (sizeToCopy > srcSize) { - /* add src to tmpIn buffer */ - memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize); - srcPtr = srcEnd; - cctxPtr->tmpInSize += srcSize; - /* still needs some CRC */ - } else { - /* complete tmpIn block and then compress it */ - lastBlockCompressed = fromTmpBuffer; - memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy); - srcPtr += sizeToCopy; - - dstPtr += LZ4F_makeBlock(dstPtr, - cctxPtr->tmpIn, blockSize, - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize; - cctxPtr->tmpInSize = 0; - } } - - while ((size_t)(srcEnd - srcPtr) >= blockSize) { - /* compress full blocks */ - lastBlockCompressed = fromSrcBuffer; - dstPtr += LZ4F_makeBlock(dstPtr, - srcPtr, blockSize, - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - srcPtr += blockSize; - } - - if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) { - /* autoFlush : remaining input (< blockSize) is compressed */ - lastBlockCompressed = fromSrcBuffer; - dstPtr += LZ4F_makeBlock(dstPtr, - srcPtr, (size_t)(srcEnd - srcPtr), - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - srcPtr = srcEnd; - } - - /* preserve dictionary within @tmpBuff whenever necessary */ - if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) { - /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */ - assert(blockCompression == LZ4B_COMPRESSED); - if (compressOptionsPtr->stableSrc) { - cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */ - } else { - int const realDictSize = LZ4F_localSaveDict(cctxPtr); - assert(0 <= realDictSize && realDictSize <= 64 KB); - cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; - } - } - - /* keep tmpIn within limits */ - if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */ - && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */ - { - /* only preserve 64KB within internal buffer. Ensures there is enough room for next block. - * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */ - int const realDictSize = LZ4F_localSaveDict(cctxPtr); - cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; - assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)); - } - - /* some input data left, necessarily < blockSize */ - if (srcPtr < srcEnd) { - /* fill tmp buffer */ - size_t const sizeToCopy = (size_t)(srcEnd - srcPtr); - memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy); - cctxPtr->tmpInSize = sizeToCopy; - } - - if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) - (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize); - - cctxPtr->totalInSize += srcSize; - return (size_t)(dstPtr - dstStart); -} - -/*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * When successful, the function always entirely consumes @srcBuffer. - * src data is either buffered or compressed into @dstBuffer. - * If previously an uncompressed block was written, buffered data is flushed - * before appending compressed data is continued. - * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). - * @compressOptionsPtr is optional : provide NULL to mean "default". - * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. - * or an error code if it fails (which can be tested using LZ4F_isError()) - * After an error, the state is left in a UB state, and must be re-initialized. - */ -size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* compressOptionsPtr) -{ - return LZ4F_compressUpdateImpl(cctxPtr, - dstBuffer, dstCapacity, - srcBuffer, srcSize, - compressOptionsPtr, LZ4B_COMPRESSED); -} - -/*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * When successful, the function always entirely consumes @srcBuffer. - * src data is either buffered or compressed into @dstBuffer. - * If previously an uncompressed block was written, buffered data is flushed - * before appending compressed data is continued. - * This is only supported when LZ4F_blockIndependent is used - * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). - * @compressOptionsPtr is optional : provide NULL to mean "default". - * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. - * or an error code if it fails (which can be tested using LZ4F_isError()) - * After an error, the state is left in a UB state, and must be re-initialized. - */ -size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* compressOptionsPtr) { - RETURN_ERROR_IF(cctxPtr->prefs.frameInfo.blockMode != LZ4F_blockIndependent, blockMode_invalid); - return LZ4F_compressUpdateImpl(cctxPtr, - dstBuffer, dstCapacity, - srcBuffer, srcSize, - compressOptionsPtr, LZ4B_UNCOMPRESSED); -} - - -/*! LZ4F_flush() : - * When compressed data must be sent immediately, without waiting for a block to be filled, - * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx. - * The result of the function is the number of bytes written into dstBuffer. - * It can be zero, this means there was no data left within LZ4F_cctx. - * The function outputs an error code if it fails (can be tested using LZ4F_isError()) - * LZ4F_compressOptions_t* is optional. NULL is a valid argument. - */ -size_t LZ4F_flush(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* compressOptionsPtr) -{ - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - compressFunc_t compress; - - if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */ - RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); - RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall); - (void)compressOptionsPtr; /* not useful (yet) */ - - /* select compression function */ - compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression); - - /* compress tmp buffer */ - dstPtr += LZ4F_makeBlock(dstPtr, - cctxPtr->tmpIn, cctxPtr->tmpInSize, - compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, - cctxPtr->cdict, - cctxPtr->prefs.frameInfo.blockChecksumFlag); - assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity)); - - if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) - cctxPtr->tmpIn += cctxPtr->tmpInSize; - cctxPtr->tmpInSize = 0; - - /* keep tmpIn within limits */ - if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */ - int const realDictSize = LZ4F_localSaveDict(cctxPtr); - cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; - } - - return (size_t)(dstPtr - dstStart); -} - - -/*! LZ4F_compressEnd() : - * When you want to properly finish the compressed frame, just call LZ4F_compressEnd(). - * It will flush whatever data remained within compressionContext (like LZ4_flush()) - * but also properly finalize the frame, with an endMark and an (optional) checksum. - * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. - * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size)) - * or an error code if it fails (can be tested using LZ4F_isError()) - * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin(). - */ -size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* compressOptionsPtr) -{ - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* dstPtr = dstStart; - - size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); - DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity); - FORWARD_IF_ERROR(flushSize); - dstPtr += flushSize; - - assert(flushSize <= dstCapacity); - dstCapacity -= flushSize; - - RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall); - LZ4F_writeLE32(dstPtr, 0); - dstPtr += 4; /* endMark */ - - if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) { - U32 const xxh = XXH32_digest(&(cctxPtr->xxh)); - RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall); - DEBUGLOG(5,"Writing 32-bit content checksum"); - LZ4F_writeLE32(dstPtr, xxh); - dstPtr+=4; /* content Checksum */ - } - - cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */ - cctxPtr->maxBufferSize = 0; /* reuse HC context */ - - if (cctxPtr->prefs.frameInfo.contentSize) { - if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize) - RETURN_ERROR(frameSize_wrong); - } - - return (size_t)(dstPtr - dstStart); -} - - -/*-*************************************************** -* Frame Decompression -*****************************************************/ - -typedef enum { - dstage_getFrameHeader=0, dstage_storeFrameHeader, - dstage_init, - dstage_getBlockHeader, dstage_storeBlockHeader, - dstage_copyDirect, dstage_getBlockChecksum, - dstage_getCBlock, dstage_storeCBlock, - dstage_flushOut, - dstage_getSuffix, dstage_storeSuffix, - dstage_getSFrameSize, dstage_storeSFrameSize, - dstage_skipSkippable -} dStage_t; - -struct LZ4F_dctx_s { - LZ4F_CustomMem cmem; - LZ4F_frameInfo_t frameInfo; - U32 version; - dStage_t dStage; - U64 frameRemainingSize; - size_t maxBlockSize; - size_t maxBufferSize; - BYTE* tmpIn; - size_t tmpInSize; - size_t tmpInTarget; - BYTE* tmpOutBuffer; - const BYTE* dict; - size_t dictSize; - BYTE* tmpOut; - size_t tmpOutSize; - size_t tmpOutStart; - XXH32_state_t xxh; - XXH32_state_t blockChecksum; - int skipChecksum; - BYTE header[LZ4F_HEADER_SIZE_MAX]; -}; /* typedef'd to LZ4F_dctx in lz4frame.h */ - - -LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) -{ - LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem); - if (dctx == NULL) return NULL; - - dctx->cmem = customMem; - dctx->version = version; - return dctx; -} - -/*! LZ4F_createDecompressionContext() : - * Create a decompressionContext object, which will track all decompression operations. - * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object. - * Object can later be released using LZ4F_freeDecompressionContext(). - * @return : if != 0, there was an error during context creation. - */ -LZ4F_errorCode_t -LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber) -{ - assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */ - RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */ - - *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber); - if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */ - RETURN_ERROR(allocation_failed); - } - return LZ4F_OK_NoError; -} - -LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx) -{ - LZ4F_errorCode_t result = LZ4F_OK_NoError; - if (dctx != NULL) { /* can accept NULL input, like free() */ - result = (LZ4F_errorCode_t)dctx->dStage; - LZ4F_free(dctx->tmpIn, dctx->cmem); - LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); - LZ4F_free(dctx, dctx->cmem); - } - return result; -} - - -/*==--- Streaming Decompression operations ---==*/ - -void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx) -{ - dctx->dStage = dstage_getFrameHeader; - dctx->dict = NULL; - dctx->dictSize = 0; - dctx->skipChecksum = 0; -} - - -/*! LZ4F_decodeHeader() : - * input : `src` points at the **beginning of the frame** - * output : set internal values of dctx, such as - * dctx->frameInfo and dctx->dStage. - * Also allocates internal buffers. - * @return : nb Bytes read from src (necessarily <= srcSize) - * or an error code (testable with LZ4F_isError()) - */ -static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize) -{ - unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID; - size_t frameHeaderSize; - const BYTE* srcPtr = (const BYTE*)src; - - DEBUGLOG(5, "LZ4F_decodeHeader"); - /* need to decode header to get frameInfo */ - RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */ - MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo)); - - /* special case : skippable frames */ - if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) { - dctx->frameInfo.frameType = LZ4F_skippableFrame; - if (src == (void*)(dctx->header)) { - dctx->tmpInSize = srcSize; - dctx->tmpInTarget = 8; - dctx->dStage = dstage_storeSFrameSize; - return srcSize; - } else { - dctx->dStage = dstage_getSFrameSize; - return 4; - } } - - /* control magic number */ -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) { - DEBUGLOG(4, "frame header error : unknown magic number"); - RETURN_ERROR(frameType_unknown); - } -#endif - dctx->frameInfo.frameType = LZ4F_frame; - - /* Flags */ - { U32 const FLG = srcPtr[4]; - U32 const version = (FLG>>6) & _2BITS; - blockChecksumFlag = (FLG>>4) & _1BIT; - blockMode = (FLG>>5) & _1BIT; - contentSizeFlag = (FLG>>3) & _1BIT; - contentChecksumFlag = (FLG>>2) & _1BIT; - dictIDFlag = FLG & _1BIT; - /* validate */ - if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ - if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */ - } - - /* Frame Header Size */ - frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); - - if (srcSize < frameHeaderSize) { - /* not enough input to fully decode frame header */ - if (srcPtr != dctx->header) - memcpy(dctx->header, srcPtr, srcSize); - dctx->tmpInSize = srcSize; - dctx->tmpInTarget = frameHeaderSize; - dctx->dStage = dstage_storeFrameHeader; - return srcSize; - } - - { U32 const BD = srcPtr[5]; - blockSizeID = (BD>>4) & _3BITS; - /* validate */ - if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ - if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */ - if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */ - } - - /* check header */ - assert(frameHeaderSize > 5); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5); - RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid); - } -#endif - - /* save */ - dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode; - dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag; - dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag; - dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID; - dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID); - if (contentSizeFlag) - dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6); - if (dictIDFlag) - dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5); - - dctx->dStage = dstage_init; - - return frameHeaderSize; -} - - -/*! LZ4F_headerSize() : - * @return : size of frame header - * or an error code, which can be tested using LZ4F_isError() - */ -size_t LZ4F_headerSize(const void* src, size_t srcSize) -{ - RETURN_ERROR_IF(src == NULL, srcPtr_wrong); - - /* minimal srcSize to determine header size */ - if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH) - RETURN_ERROR(frameHeader_incomplete); - - /* special case : skippable frames */ - if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) - return 8; - - /* control magic number */ -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER) - RETURN_ERROR(frameType_unknown); -#endif - - /* Frame Header Size */ - { BYTE const FLG = ((const BYTE*)src)[4]; - U32 const contentSizeFlag = (FLG>>3) & _1BIT; - U32 const dictIDFlag = FLG & _1BIT; - return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); - } -} - -/*! LZ4F_getFrameInfo() : - * This function extracts frame parameters (max blockSize, frame checksum, etc.). - * Usage is optional. Objective is to provide relevant information for allocation purposes. - * This function works in 2 situations : - * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process. - * Amount of input data provided must be large enough to successfully decode the frame header. - * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum. - * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx. - * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value). - * Decompression must resume from (srcBuffer + *srcSizePtr). - * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call, - * or an error code which can be tested using LZ4F_isError() - * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped. - * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. - */ -LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, - LZ4F_frameInfo_t* frameInfoPtr, - const void* srcBuffer, size_t* srcSizePtr) -{ - LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader); - if (dctx->dStage > dstage_storeFrameHeader) { - /* frameInfo already decoded */ - size_t o=0, i=0; - *srcSizePtr = 0; - *frameInfoPtr = dctx->frameInfo; - /* returns : recommended nb of bytes for LZ4F_decompress() */ - return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL); - } else { - if (dctx->dStage == dstage_storeFrameHeader) { - /* frame decoding already started, in the middle of header => automatic fail */ - *srcSizePtr = 0; - RETURN_ERROR(frameDecoding_alreadyStarted); - } else { - size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr); - if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; } - if (*srcSizePtr < hSize) { - *srcSizePtr=0; - RETURN_ERROR(frameHeader_incomplete); - } - - { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize); - if (LZ4F_isError(decodeResult)) { - *srcSizePtr = 0; - } else { - *srcSizePtr = decodeResult; - decodeResult = BHSize; /* block header size */ - } - *frameInfoPtr = dctx->frameInfo; - return decodeResult; - } } } -} - - -/* LZ4F_updateDict() : - * only used for LZ4F_blockLinked mode - * Condition : @dstPtr != NULL - */ -static void LZ4F_updateDict(LZ4F_dctx* dctx, - const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart, - unsigned withinTmp) -{ - assert(dstPtr != NULL); - if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */ - assert(dctx->dict != NULL); - - if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */ - dctx->dictSize += dstSize; - return; - } - - assert(dstPtr >= dstBufferStart); - if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */ - dctx->dict = (const BYTE*)dstBufferStart; - dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize; - return; - } - - assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */ - - /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */ - assert(dctx->tmpOutBuffer != NULL); - - if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */ - /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */ - assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart); - dctx->dictSize += dstSize; - return; - } - - if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */ - size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); - size_t copySize = 64 KB - dctx->tmpOutSize; - const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; - if (dctx->tmpOutSize > 64 KB) copySize = 0; - if (copySize > preserveSize) copySize = preserveSize; - - memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); - - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize; - return; - } - - if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */ - if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */ - size_t const preserveSize = 64 KB - dstSize; - memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); - dctx->dictSize = preserveSize; - } - memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize); - dctx->dictSize += dstSize; - return; - } - - /* join dict & dest into tmp */ - { size_t preserveSize = 64 KB - dstSize; - if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize; - memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); - memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize); - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = preserveSize + dstSize; - } -} - - -/*! LZ4F_decompress() : - * Call this function repetitively to regenerate compressed data in srcBuffer. - * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer - * into dstBuffer of capacity *dstSizePtr. - * - * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value). - * - * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value). - * If number of bytes read is < number of bytes provided, then decompression operation is not complete. - * Remaining data will have to be presented again in a subsequent invocation. - * - * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress. - * Schematically, it's the size of the current (or remaining) compressed block + header of next block. - * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling. - * Note that this is just a hint, and it's always possible to any srcSize value. - * When a frame is fully decoded, @return will be 0. - * If decompression failed, @return is an error code which can be tested using LZ4F_isError(). - */ -size_t LZ4F_decompress(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const LZ4F_decompressOptions_t* decompressOptionsPtr) -{ - LZ4F_decompressOptions_t optionsNull; - const BYTE* const srcStart = (const BYTE*)srcBuffer; - const BYTE* const srcEnd = srcStart + *srcSizePtr; - const BYTE* srcPtr = srcStart; - BYTE* const dstStart = (BYTE*)dstBuffer; - BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL; - BYTE* dstPtr = dstStart; - const BYTE* selectedIn = NULL; - unsigned doAnotherStage = 1; - size_t nextSrcSizeHint = 1; - - - DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u", - srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr); - if (dstBuffer == NULL) assert(*dstSizePtr == 0); - MEM_INIT(&optionsNull, 0, sizeof(optionsNull)); - if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull; - *srcSizePtr = 0; - *dstSizePtr = 0; - assert(dctx != NULL); - dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */ - - /* behaves as a state machine */ - - while (doAnotherStage) { - - switch(dctx->dStage) - { - - case dstage_getFrameHeader: - DEBUGLOG(6, "dstage_getFrameHeader"); - if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */ - size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */ - FORWARD_IF_ERROR(hSize); - srcPtr += hSize; - break; - } - dctx->tmpInSize = 0; - if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */ - dctx->tmpInTarget = minFHSize; /* minimum size to decode header */ - dctx->dStage = dstage_storeFrameHeader; - /* fall-through */ - - case dstage_storeFrameHeader: - DEBUGLOG(6, "dstage_storeFrameHeader"); - { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr)); - memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); - dctx->tmpInSize += sizeToCopy; - srcPtr += sizeToCopy; - } - if (dctx->tmpInSize < dctx->tmpInTarget) { - nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ - doAnotherStage = 0; /* not enough src data, ask for some more */ - break; - } - FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */ - break; - - case dstage_init: - DEBUGLOG(6, "dstage_init"); - if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0); - /* internal buffers allocation */ - { size_t const bufferNeeded = dctx->maxBlockSize - + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0); - if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */ - dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ - LZ4F_free(dctx->tmpIn, dctx->cmem); - dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem); - RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed); - LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); - dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem); - RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed); - dctx->maxBufferSize = bufferNeeded; - } } - dctx->tmpInSize = 0; - dctx->tmpInTarget = 0; - dctx->tmpOut = dctx->tmpOutBuffer; - dctx->tmpOutStart = 0; - dctx->tmpOutSize = 0; - - dctx->dStage = dstage_getBlockHeader; - /* fall-through */ - - case dstage_getBlockHeader: - if ((size_t)(srcEnd - srcPtr) >= BHSize) { - selectedIn = srcPtr; - srcPtr += BHSize; - } else { - /* not enough input to read cBlockSize field */ - dctx->tmpInSize = 0; - dctx->dStage = dstage_storeBlockHeader; - } - - if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */ - case dstage_storeBlockHeader: - { size_t const remainingInput = (size_t)(srcEnd - srcPtr); - size_t const wantedData = BHSize - dctx->tmpInSize; - size_t const sizeToCopy = MIN(wantedData, remainingInput); - memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); - srcPtr += sizeToCopy; - dctx->tmpInSize += sizeToCopy; - - if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */ - nextSrcSizeHint = BHSize - dctx->tmpInSize; - doAnotherStage = 0; - break; - } - selectedIn = dctx->tmpIn; - } /* if (dctx->dStage == dstage_storeBlockHeader) */ - - /* decode block header */ - { U32 const blockHeader = LZ4F_readLE32(selectedIn); - size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU; - size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize; - if (blockHeader==0) { /* frameEnd signal, no more block */ - DEBUGLOG(5, "end of frame"); - dctx->dStage = dstage_getSuffix; - break; - } - if (nextCBlockSize > dctx->maxBlockSize) { - RETURN_ERROR(maxBlockSize_invalid); - } - if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) { - /* next block is uncompressed */ - dctx->tmpInTarget = nextCBlockSize; - DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize); - if (dctx->frameInfo.blockChecksumFlag) { - (void)XXH32_reset(&dctx->blockChecksum, 0); - } - dctx->dStage = dstage_copyDirect; - break; - } - /* next block is a compressed block */ - dctx->tmpInTarget = nextCBlockSize + crcSize; - dctx->dStage = dstage_getCBlock; - if (dstPtr==dstEnd || srcPtr==srcEnd) { - nextSrcSizeHint = BHSize + nextCBlockSize + crcSize; - doAnotherStage = 0; - } - break; - } - - case dstage_copyDirect: /* uncompressed block */ - DEBUGLOG(6, "dstage_copyDirect"); - { size_t sizeToCopy; - if (dstPtr == NULL) { - sizeToCopy = 0; - } else { - size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr)); - sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize); - memcpy(dstPtr, srcPtr, sizeToCopy); - if (!dctx->skipChecksum) { - if (dctx->frameInfo.blockChecksumFlag) { - (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy); - } - if (dctx->frameInfo.contentChecksumFlag) - (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy); - } - if (dctx->frameInfo.contentSize) - dctx->frameRemainingSize -= sizeToCopy; - - /* history management (linked blocks only)*/ - if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { - LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0); - } } - - srcPtr += sizeToCopy; - dstPtr += sizeToCopy; - if (sizeToCopy == dctx->tmpInTarget) { /* all done */ - if (dctx->frameInfo.blockChecksumFlag) { - dctx->tmpInSize = 0; - dctx->dStage = dstage_getBlockChecksum; - } else - dctx->dStage = dstage_getBlockHeader; /* new block */ - break; - } - dctx->tmpInTarget -= sizeToCopy; /* need to copy more */ - } - nextSrcSizeHint = dctx->tmpInTarget + - +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0) - + BHSize /* next header size */; - doAnotherStage = 0; - break; - - /* check block checksum for recently transferred uncompressed block */ - case dstage_getBlockChecksum: - DEBUGLOG(6, "dstage_getBlockChecksum"); - { const void* crcSrc; - if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) { - crcSrc = srcPtr; - srcPtr += 4; - } else { - size_t const stillToCopy = 4 - dctx->tmpInSize; - size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr)); - memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); - dctx->tmpInSize += sizeToCopy; - srcPtr += sizeToCopy; - if (dctx->tmpInSize < 4) { /* all input consumed */ - doAnotherStage = 0; - break; - } - crcSrc = dctx->header; - } - if (!dctx->skipChecksum) { - U32 const readCRC = LZ4F_readLE32(crcSrc); - U32 const calcCRC = XXH32_digest(&dctx->blockChecksum); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - DEBUGLOG(6, "compare block checksum"); - if (readCRC != calcCRC) { - DEBUGLOG(4, "incorrect block checksum: %08X != %08X", - readCRC, calcCRC); - RETURN_ERROR(blockChecksum_invalid); - } -#else - (void)readCRC; - (void)calcCRC; -#endif - } } - dctx->dStage = dstage_getBlockHeader; /* new block */ - break; - - case dstage_getCBlock: - DEBUGLOG(6, "dstage_getCBlock"); - if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) { - dctx->tmpInSize = 0; - dctx->dStage = dstage_storeCBlock; - break; - } - /* input large enough to read full block directly */ - selectedIn = srcPtr; - srcPtr += dctx->tmpInTarget; - - if (0) /* always jump over next block */ - case dstage_storeCBlock: - { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize; - size_t const inputLeft = (size_t)(srcEnd-srcPtr); - size_t const sizeToCopy = MIN(wantedData, inputLeft); - memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); - dctx->tmpInSize += sizeToCopy; - srcPtr += sizeToCopy; - if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */ - nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) - + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0) - + BHSize /* next header size */; - doAnotherStage = 0; - break; - } - selectedIn = dctx->tmpIn; - } - - /* At this stage, input is large enough to decode a block */ - - /* First, decode and control block checksum if it exists */ - if (dctx->frameInfo.blockChecksumFlag) { - assert(dctx->tmpInTarget >= 4); - dctx->tmpInTarget -= 4; - assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */ - { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget); - U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid); -#else - (void)readBlockCrc; - (void)calcBlockCrc; -#endif - } } - - /* decode directly into destination buffer if there is enough room */ - if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) - /* unless the dictionary is stored in tmpOut: - * in which case it's faster to decode within tmpOut - * to benefit from prefix speedup */ - && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) ) - { - const char* dict = (const char*)dctx->dict; - size_t dictSize = dctx->dictSize; - int decodedSize; - assert(dstPtr != NULL); - if (dict && dictSize > 1 GB) { - /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */ - dict += dictSize - 64 KB; - dictSize = 64 KB; - } - decodedSize = LZ4_decompress_safe_usingDict( - (const char*)selectedIn, (char*)dstPtr, - (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, - dict, (int)dictSize); - RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); - if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum)) - XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize); - if (dctx->frameInfo.contentSize) - dctx->frameRemainingSize -= (size_t)decodedSize; - - /* dictionary management */ - if (dctx->frameInfo.blockMode==LZ4F_blockLinked) { - LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0); - } - - dstPtr += decodedSize; - dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */ - break; - } - - /* not enough place into dst : decode into tmpOut */ - - /* manage dictionary */ - if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { - if (dctx->dict == dctx->tmpOutBuffer) { - /* truncate dictionary to 64 KB if too big */ - if (dctx->dictSize > 128 KB) { - memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB); - dctx->dictSize = 64 KB; - } - dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize; - } else { /* dict not within tmpOut */ - size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB); - dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace; - } } - - /* Decode block into tmpOut */ - { const char* dict = (const char*)dctx->dict; - size_t dictSize = dctx->dictSize; - int decodedSize; - if (dict && dictSize > 1 GB) { - /* the dictSize param is an int, avoid truncation / sign issues */ - dict += dictSize - 64 KB; - dictSize = 64 KB; - } - decodedSize = LZ4_decompress_safe_usingDict( - (const char*)selectedIn, (char*)dctx->tmpOut, - (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, - dict, (int)dictSize); - RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); - if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum) - XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize); - if (dctx->frameInfo.contentSize) - dctx->frameRemainingSize -= (size_t)decodedSize; - dctx->tmpOutSize = (size_t)decodedSize; - dctx->tmpOutStart = 0; - dctx->dStage = dstage_flushOut; - } - /* fall-through */ - - case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */ - DEBUGLOG(6, "dstage_flushOut"); - if (dstPtr != NULL) { - size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr)); - memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy); - - /* dictionary management */ - if (dctx->frameInfo.blockMode == LZ4F_blockLinked) - LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/); - - dctx->tmpOutStart += sizeToCopy; - dstPtr += sizeToCopy; - } - if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */ - dctx->dStage = dstage_getBlockHeader; /* get next block */ - break; - } - /* could not flush everything : stop there, just request a block header */ - doAnotherStage = 0; - nextSrcSizeHint = BHSize; - break; - - case dstage_getSuffix: - RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */ - if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */ - nextSrcSizeHint = 0; - LZ4F_resetDecompressionContext(dctx); - doAnotherStage = 0; - break; - } - if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ - dctx->tmpInSize = 0; - dctx->dStage = dstage_storeSuffix; - } else { - selectedIn = srcPtr; - srcPtr += 4; - } - - if (dctx->dStage == dstage_storeSuffix) /* can be skipped */ - case dstage_storeSuffix: - { size_t const remainingInput = (size_t)(srcEnd - srcPtr); - size_t const wantedData = 4 - dctx->tmpInSize; - size_t const sizeToCopy = MIN(wantedData, remainingInput); - memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); - srcPtr += sizeToCopy; - dctx->tmpInSize += sizeToCopy; - if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */ - nextSrcSizeHint = 4 - dctx->tmpInSize; - doAnotherStage=0; - break; - } - selectedIn = dctx->tmpIn; - } /* if (dctx->dStage == dstage_storeSuffix) */ - - /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */ - if (!dctx->skipChecksum) { - U32 const readCRC = LZ4F_readLE32(selectedIn); - U32 const resultCRC = XXH32_digest(&(dctx->xxh)); -#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid); -#else - (void)readCRC; - (void)resultCRC; -#endif - } - nextSrcSizeHint = 0; - LZ4F_resetDecompressionContext(dctx); - doAnotherStage = 0; - break; - - case dstage_getSFrameSize: - if ((srcEnd - srcPtr) >= 4) { - selectedIn = srcPtr; - srcPtr += 4; - } else { - /* not enough input to read cBlockSize field */ - dctx->tmpInSize = 4; - dctx->tmpInTarget = 8; - dctx->dStage = dstage_storeSFrameSize; - } - - if (dctx->dStage == dstage_storeSFrameSize) - case dstage_storeSFrameSize: - { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, - (size_t)(srcEnd - srcPtr) ); - memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); - srcPtr += sizeToCopy; - dctx->tmpInSize += sizeToCopy; - if (dctx->tmpInSize < dctx->tmpInTarget) { - /* not enough input to get full sBlockSize; wait for more */ - nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize; - doAnotherStage = 0; - break; - } - selectedIn = dctx->header + 4; - } /* if (dctx->dStage == dstage_storeSFrameSize) */ - - /* case dstage_decodeSFrameSize: */ /* no direct entry */ - { size_t const SFrameSize = LZ4F_readLE32(selectedIn); - dctx->frameInfo.contentSize = SFrameSize; - dctx->tmpInTarget = SFrameSize; - dctx->dStage = dstage_skipSkippable; - break; - } - - case dstage_skipSkippable: - { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr)); - srcPtr += skipSize; - dctx->tmpInTarget -= skipSize; - doAnotherStage = 0; - nextSrcSizeHint = dctx->tmpInTarget; - if (nextSrcSizeHint) break; /* still more to skip */ - /* frame fully skipped : prepare context for a new frame */ - LZ4F_resetDecompressionContext(dctx); - break; - } - } /* switch (dctx->dStage) */ - } /* while (doAnotherStage) */ - - /* preserve history within tmpOut whenever necessary */ - LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2); - if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */ - && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */ - && (dctx->dict != NULL) /* dictionary exists */ - && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */ - && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */ - { - if (dctx->dStage == dstage_flushOut) { - size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); - size_t copySize = 64 KB - dctx->tmpOutSize; - const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; - if (dctx->tmpOutSize > 64 KB) copySize = 0; - if (copySize > preserveSize) copySize = preserveSize; - assert(dctx->tmpOutBuffer != NULL); - - memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); - - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = preserveSize + dctx->tmpOutStart; - } else { - const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize; - size_t const newDictSize = MIN(dctx->dictSize, 64 KB); - - memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); - - dctx->dict = dctx->tmpOutBuffer; - dctx->dictSize = newDictSize; - dctx->tmpOut = dctx->tmpOutBuffer + newDictSize; - } - } - - *srcSizePtr = (size_t)(srcPtr - srcStart); - *dstSizePtr = (size_t)(dstPtr - dstStart); - return nextSrcSizeHint; -} - -/*! LZ4F_decompress_usingDict() : - * Same as LZ4F_decompress(), using a predefined dictionary. - * Dictionary is used "in place", without any preprocessing. - * It must remain accessible throughout the entire frame decoding. - */ -size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* decompressOptionsPtr) -{ - if (dctx->dStage <= dstage_init) { - dctx->dict = (const BYTE*)dict; - dctx->dictSize = dictSize; - } - return LZ4F_decompress(dctx, dstBuffer, dstSizePtr, - srcBuffer, srcSizePtr, - decompressOptionsPtr); -} diff --git a/librocksdb-sys/lz4/lib/lz4frame.h b/librocksdb-sys/lz4/lib/lz4frame.h deleted file mode 100644 index 1bdf6c4..0000000 --- a/librocksdb-sys/lz4/lib/lz4frame.h +++ /dev/null @@ -1,692 +0,0 @@ -/* - LZ4F - LZ4-Frame library - Header File - Copyright (C) 2011-2020, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/* LZ4F is a stand-alone API able to create and decode LZ4 frames - * conformant with specification v1.6.1 in doc/lz4_Frame_format.md . - * Generated frames are compatible with `lz4` CLI. - * - * LZ4F also offers streaming capabilities. - * - * lz4.h is not required when using lz4frame.h, - * except to extract common constants such as LZ4_VERSION_NUMBER. - * */ - -#ifndef LZ4F_H_09782039843 -#define LZ4F_H_09782039843 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* --- Dependency --- */ -#include /* size_t */ - - -/** - * Introduction - * - * lz4frame.h implements LZ4 frame specification: see doc/lz4_Frame_format.md . - * LZ4 Frames are compatible with `lz4` CLI, - * and designed to be interoperable with any system. -**/ - -/*-*************************************************************** - * Compiler specifics - *****************************************************************/ -/* LZ4_DLL_EXPORT : - * Enable exporting of functions when building a Windows DLL - * LZ4FLIB_VISIBILITY : - * Control library symbols visibility. - */ -#ifndef LZ4FLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define LZ4FLIB_VISIBILITY -# endif -#endif -#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) -# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY -#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) -# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY -#else -# define LZ4FLIB_API LZ4FLIB_VISIBILITY -#endif - -#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS -# define LZ4F_DEPRECATE(x) x -#else -# if defined(_MSC_VER) -# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */ -# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6)) -# define LZ4F_DEPRECATE(x) x __attribute__((deprecated)) -# else -# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */ -# endif -#endif - - -/*-************************************ - * Error management - **************************************/ -typedef size_t LZ4F_errorCode_t; - -LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */ -LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */ - - -/*-************************************ - * Frame compression types - ************************************* */ -/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */ -#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS -# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x -#else -# define LZ4F_OBSOLETE_ENUM(x) -#endif - -/* The larger the block size, the (slightly) better the compression ratio, - * though there are diminishing returns. - * Larger blocks also increase memory usage on both compression and decompression sides. - */ -typedef enum { - LZ4F_default=0, - LZ4F_max64KB=4, - LZ4F_max256KB=5, - LZ4F_max1MB=6, - LZ4F_max4MB=7 - LZ4F_OBSOLETE_ENUM(max64KB) - LZ4F_OBSOLETE_ENUM(max256KB) - LZ4F_OBSOLETE_ENUM(max1MB) - LZ4F_OBSOLETE_ENUM(max4MB) -} LZ4F_blockSizeID_t; - -/* Linked blocks sharply reduce inefficiencies when using small blocks, - * they compress better. - * However, some LZ4 decoders are only compatible with independent blocks */ -typedef enum { - LZ4F_blockLinked=0, - LZ4F_blockIndependent - LZ4F_OBSOLETE_ENUM(blockLinked) - LZ4F_OBSOLETE_ENUM(blockIndependent) -} LZ4F_blockMode_t; - -typedef enum { - LZ4F_noContentChecksum=0, - LZ4F_contentChecksumEnabled - LZ4F_OBSOLETE_ENUM(noContentChecksum) - LZ4F_OBSOLETE_ENUM(contentChecksumEnabled) -} LZ4F_contentChecksum_t; - -typedef enum { - LZ4F_noBlockChecksum=0, - LZ4F_blockChecksumEnabled -} LZ4F_blockChecksum_t; - -typedef enum { - LZ4F_frame=0, - LZ4F_skippableFrame - LZ4F_OBSOLETE_ENUM(skippableFrame) -} LZ4F_frameType_t; - -#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS -typedef LZ4F_blockSizeID_t blockSizeID_t; -typedef LZ4F_blockMode_t blockMode_t; -typedef LZ4F_frameType_t frameType_t; -typedef LZ4F_contentChecksum_t contentChecksum_t; -#endif - -/*! LZ4F_frameInfo_t : - * makes it possible to set or read frame parameters. - * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO, - * setting all parameters to default. - * It's then possible to update selectively some parameters */ -typedef struct { - LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */ - LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */ - LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */ - LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */ - unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */ - unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */ - LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */ -} LZ4F_frameInfo_t; - -#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */ - -/*! LZ4F_preferences_t : - * makes it possible to supply advanced compression instructions to streaming interface. - * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES, - * setting all parameters to default. - * All reserved fields must be set to zero. */ -typedef struct { - LZ4F_frameInfo_t frameInfo; - int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */ - unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */ - unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */ - unsigned reserved[3]; /* must be zero for forward compatibility */ -} LZ4F_preferences_t; - -#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */ - - -/*-********************************* -* Simple compression function -***********************************/ - -LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */ - -/*! LZ4F_compressFrameBound() : - * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences. - * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences. - * Note : this result is only usable with LZ4F_compressFrame(). - * It may also be relevant to LZ4F_compressUpdate() _only if_ no flush() operation is ever performed. - */ -LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr); - -/*! LZ4F_compressFrame() : - * Compress an entire srcBuffer into a valid LZ4 frame. - * dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_preferences_t* preferencesPtr); - - -/*-*********************************** -* Advanced compression functions -*************************************/ -typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */ -typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with older APIs, prefer using LZ4F_cctx */ - -typedef struct { - unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */ - unsigned reserved[3]; -} LZ4F_compressOptions_t; - -/*--- Resource Management ---*/ - -#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */ -LZ4FLIB_API unsigned LZ4F_getVersion(void); - -/*! LZ4F_createCompressionContext() : - * The first thing to do is to create a compressionContext object, - * which will keep track of operation state during streaming compression. - * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version, - * and a pointer to LZ4F_cctx*, to write the resulting pointer into. - * @version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL. - * The function provides a pointer to a fully allocated LZ4F_cctx object. - * @cctxPtr MUST be != NULL. - * If @return != zero, context creation failed. - * A created compression context can be employed multiple times for consecutive streaming operations. - * Once all streaming compression jobs are completed, - * the state object can be released using LZ4F_freeCompressionContext(). - * Note1 : LZ4F_freeCompressionContext() is always successful. Its return value can be ignored. - * Note2 : LZ4F_freeCompressionContext() works fine with NULL input pointers (do nothing). -**/ -LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version); -LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx); - - -/*---- Compression ----*/ - -#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected parameters */ -#define LZ4F_HEADER_SIZE_MAX 19 - -/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */ -#define LZ4F_BLOCK_HEADER_SIZE 4 - -/* Size in bytes of a block checksum footer in little-endian format. */ -#define LZ4F_BLOCK_CHECKSUM_SIZE 4 - -/* Size in bytes of the content checksum. */ -#define LZ4F_CONTENT_CHECKSUM_SIZE 4 - -/*! LZ4F_compressBegin() : - * will write the frame header into dstBuffer. - * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default. - * @return : number of bytes written into dstBuffer for the header - * or an error code (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_preferences_t* prefsPtr); - -/*! LZ4F_compressBound() : - * Provides minimum dstCapacity required to guarantee success of - * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario. - * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead. - * Note that the result is only valid for a single invocation of LZ4F_compressUpdate(). - * When invoking LZ4F_compressUpdate() multiple times, - * if the output buffer is gradually filled up instead of emptied and re-used from its start, - * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound(). - * @return is always the same for a srcSize and prefsPtr. - * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. - * tech details : - * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes. - * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd(). - * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin(). - */ -LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr); - -/*! LZ4F_compressUpdate() : - * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations. - * This value is provided by LZ4F_compressBound(). - * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). - * After an error, the state is left in a UB state, and must be re-initialized or freed. - * If previously an uncompressed block was written, buffered data is flushed - * before appending compressed data is continued. - * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. - * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). - * or an error code if it fails (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* cOptPtr); - -/*! LZ4F_flush() : - * When data must be generated and sent immediately, without waiting for a block to be completely filled, - * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. - * `dstCapacity` must be large enough to ensure the operation will be successful. - * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. - * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx) - * or an error code if it fails (which can be tested using LZ4F_isError()) - * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - */ -LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* cOptPtr); - -/*! LZ4F_compressEnd() : - * To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). - * It will flush whatever data remained within `cctx` (like LZ4_flush()) - * and properly finalize the frame, with an endMark and a checksum. - * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default. - * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark), - * or an error code if it fails (which can be tested using LZ4F_isError()) - * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr). - * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. - */ -LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_compressOptions_t* cOptPtr); - - -/*-********************************* -* Decompression functions -***********************************/ -typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */ -typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */ - -typedef struct { - unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations. - * This optimization skips storage operations in tmp buffers. */ - unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time. - * Setting this option to 1 once disables all checksums for the rest of the frame. */ - unsigned reserved1; /* must be set to zero for forward compatibility */ - unsigned reserved0; /* idem */ -} LZ4F_decompressOptions_t; - - -/* Resource management */ - -/*! LZ4F_createDecompressionContext() : - * Create an LZ4F_dctx object, to track all decompression operations. - * @version provided MUST be LZ4F_VERSION. - * @dctxPtr MUST be valid. - * The function fills @dctxPtr with the value of a pointer to an allocated and initialized LZ4F_dctx object. - * The @return is an errorCode, which can be tested using LZ4F_isError(). - * dctx memory can be released using LZ4F_freeDecompressionContext(); - * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released. - * That is, it should be == 0 if decompression has been completed fully and correctly. - */ -LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version); -LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx); - - -/*-*********************************** -* Streaming decompression functions -*************************************/ - -#define LZ4F_MAGICNUMBER 0x184D2204U -#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U -#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5 - -/*! LZ4F_headerSize() : v1.9.0+ - * Provide the header size of a frame starting at `src`. - * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH, - * which is enough to decode the header length. - * @return : size of frame header - * or an error code, which can be tested using LZ4F_isError() - * note : Frame header size is variable, but is guaranteed to be - * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes. - */ -LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize); - -/*! LZ4F_getFrameInfo() : - * This function extracts frame parameters (max blockSize, dictID, etc.). - * Its usage is optional: user can also invoke LZ4F_decompress() directly. - * - * Extracted information will fill an existing LZ4F_frameInfo_t structure. - * This can be useful for allocation and dictionary identification purposes. - * - * LZ4F_getFrameInfo() can work in the following situations : - * - * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress(). - * It will decode header from `srcBuffer`, - * consuming the header and starting the decoding process. - * - * Input size must be large enough to contain the full frame header. - * Frame header size can be known beforehand by LZ4F_headerSize(). - * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes, - * and not more than <= LZ4F_HEADER_SIZE_MAX bytes. - * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work. - * It's allowed to provide more input data than the header size, - * LZ4F_getFrameInfo() will only consume the header. - * - * If input size is not large enough, - * aka if it's smaller than header size, - * function will fail and return an error code. - * - * 2) After decoding has been started, - * it's possible to invoke LZ4F_getFrameInfo() anytime - * to extract already decoded frame parameters stored within dctx. - * - * Note that, if decoding has barely started, - * and not yet read enough information to decode the header, - * LZ4F_getFrameInfo() will fail. - * - * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value). - * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started, - * and when decoding the header has been successful. - * Decompression must then resume from (srcBuffer + *srcSizePtr). - * - * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call, - * or an error code which can be tested using LZ4F_isError(). - * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely. - * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. - */ -LZ4FLIB_API size_t -LZ4F_getFrameInfo(LZ4F_dctx* dctx, - LZ4F_frameInfo_t* frameInfoPtr, - const void* srcBuffer, size_t* srcSizePtr); - -/*! LZ4F_decompress() : - * Call this function repetitively to regenerate data compressed in `srcBuffer`. - * - * The function requires a valid dctx state. - * It will read up to *srcSizePtr bytes from srcBuffer, - * and decompress data into dstBuffer, of capacity *dstSizePtr. - * - * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). - * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). - * - * The function does not necessarily read all input bytes, so always check value in *srcSizePtr. - * Unconsumed source data must be presented again in subsequent invocations. - * - * `dstBuffer` can freely change between each consecutive function invocation. - * `dstBuffer` content will be overwritten. - * - * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call. - * Schematically, it's the size of the current (or remaining) compressed block + header of next block. - * Respecting the hint provides some small speed benefit, because it skips intermediate buffers. - * This is just a hint though, it's always possible to provide any srcSize. - * - * When a frame is fully decoded, @return will be 0 (no more data expected). - * When provided with more bytes than necessary to decode a frame, - * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0. - * - * If decompression failed, @return is an error code, which can be tested using LZ4F_isError(). - * After a decompression error, the `dctx` context is not resumable. - * Use LZ4F_resetDecompressionContext() to return to clean state. - * - * After a frame is fully decoded, dctx can be used again to decompress another frame. - */ -LZ4FLIB_API size_t -LZ4F_decompress(LZ4F_dctx* dctx, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const LZ4F_decompressOptions_t* dOptPtr); - - -/*! LZ4F_resetDecompressionContext() : added in v1.8.0 - * In case of an error, the context is left in "undefined" state. - * In which case, it's necessary to reset it, before re-using it. - * This method can also be used to abruptly stop any unfinished decompression, - * and start a new one using same context resources. */ -LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */ - - - -#if defined (__cplusplus) -} -#endif - -#endif /* LZ4F_H_09782039843 */ - -#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) -#define LZ4F_H_STATIC_09782039843 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* These declarations are not stable and may change in the future. - * They are therefore only safe to depend on - * when the caller is statically linked against the library. - * To access their declarations, define LZ4F_STATIC_LINKING_ONLY. - * - * By default, these symbols aren't published into shared/dynamic libraries. - * You can override this behavior and force them to be published - * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS. - * Use at your own risk. - */ -#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS -# define LZ4FLIB_STATIC_API LZ4FLIB_API -#else -# define LZ4FLIB_STATIC_API -#endif - - -/* --- Error List --- */ -#define LZ4F_LIST_ERRORS(ITEM) \ - ITEM(OK_NoError) \ - ITEM(ERROR_GENERIC) \ - ITEM(ERROR_maxBlockSize_invalid) \ - ITEM(ERROR_blockMode_invalid) \ - ITEM(ERROR_contentChecksumFlag_invalid) \ - ITEM(ERROR_compressionLevel_invalid) \ - ITEM(ERROR_headerVersion_wrong) \ - ITEM(ERROR_blockChecksum_invalid) \ - ITEM(ERROR_reservedFlag_set) \ - ITEM(ERROR_allocation_failed) \ - ITEM(ERROR_srcSize_tooLarge) \ - ITEM(ERROR_dstMaxSize_tooSmall) \ - ITEM(ERROR_frameHeader_incomplete) \ - ITEM(ERROR_frameType_unknown) \ - ITEM(ERROR_frameSize_wrong) \ - ITEM(ERROR_srcPtr_wrong) \ - ITEM(ERROR_decompressionFailed) \ - ITEM(ERROR_headerChecksum_invalid) \ - ITEM(ERROR_contentChecksum_invalid) \ - ITEM(ERROR_frameDecoding_alreadyStarted) \ - ITEM(ERROR_compressionState_uninitialized) \ - ITEM(ERROR_parameter_null) \ - ITEM(ERROR_maxCode) - -#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, - -/* enum list is exposed, to handle specific errors */ -typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) - _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes; - -LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); - - -/*! LZ4F_getBlockSize() : - * Return, in scalar format (size_t), - * the maximum block size associated with blockSizeID. -**/ -LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID); - -/*! LZ4F_uncompressedUpdate() : - * LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary. - * Important rule: dstCapacity MUST be large enough to store the entire source buffer as - * no compression is done for this operation - * If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode). - * After an error, the state is left in a UB state, and must be re-initialized or freed. - * If previously a compressed block was written, buffered data is flushed - * before appending uncompressed data is continued. - * This is only supported when LZ4F_blockIndependent is used - * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. - * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). - * or an error code if it fails (which can be tested using LZ4F_isError()) - */ -LZ4FLIB_STATIC_API size_t -LZ4F_uncompressedUpdate(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const void* srcBuffer, size_t srcSize, - const LZ4F_compressOptions_t* cOptPtr); - -/********************************** - * Bulk processing dictionary API - *********************************/ - -/* A Dictionary is useful for the compression of small messages (KB range). - * It dramatically improves compression efficiency. - * - * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful. - * Best results are generally achieved by using Zstandard's Dictionary Builder - * to generate a high-quality dictionary from a set of samples. - * - * Loading a dictionary has a cost, since it involves construction of tables. - * The Bulk processing dictionary API makes it possible to share this cost - * over an arbitrary number of compression jobs, even concurrently, - * markedly improving compression latency for these cases. - * - * The same dictionary will have to be used on the decompression side - * for decoding to be successful. - * To help identify the correct dictionary at decoding stage, - * the frame header allows optional embedding of a dictID field. - */ -typedef struct LZ4F_CDict_s LZ4F_CDict; - -/*! LZ4_createCDict() : - * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once. - * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. - * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */ -LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); -LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict); - - -/*! LZ4_compressFrame_usingCDict() : - * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. - * cctx must point to a context created by LZ4F_createCompressionContext(). - * If cdict==NULL, compress without a dictionary. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * If this condition is not respected, function will fail (@return an errorCode). - * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, - * but it's not recommended, as it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t -LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr); - - -/*! LZ4F_compressBegin_usingCDict() : - * Inits streaming dictionary compression, and writes the frame header into dstBuffer. - * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * `prefsPtr` is optional : you may provide NULL as argument, - * however, it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer for the header, - * or an error code (which can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t -LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* prefsPtr); - - -/*! LZ4F_decompress_usingDict() : - * Same as LZ4F_decompress(), using a predefined dictionary. - * Dictionary is used "in place", without any preprocessing. -** It must remain accessible throughout the entire frame decoding. */ -LZ4FLIB_STATIC_API size_t -LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* decompressOptionsPtr); - - -/*! Custom memory allocation : - * These prototypes make it possible to pass custom allocation/free functions. - * LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below. - * All allocation/free operations will be completed using these custom variants instead of regular ones. - */ -typedef void* (*LZ4F_AllocFunction) (void* opaqueState, size_t size); -typedef void* (*LZ4F_CallocFunction) (void* opaqueState, size_t size); -typedef void (*LZ4F_FreeFunction) (void* opaqueState, void* address); -typedef struct { - LZ4F_AllocFunction customAlloc; - LZ4F_CallocFunction customCalloc; /* optional; when not defined, uses customAlloc + memset */ - LZ4F_FreeFunction customFree; - void* opaqueState; -} LZ4F_CustomMem; -static -#ifdef __GNUC__ -__attribute__((__unused__)) -#endif -LZ4F_CustomMem const LZ4F_defaultCMem = { NULL, NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ - -LZ4FLIB_STATIC_API LZ4F_cctx* LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); -LZ4FLIB_STATIC_API LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version); -LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict_advanced(LZ4F_CustomMem customMem, const void* dictBuffer, size_t dictSize); - - -#if defined (__cplusplus) -} -#endif - -#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */ diff --git a/librocksdb-sys/lz4/lib/lz4frame_static.h b/librocksdb-sys/lz4/lib/lz4frame_static.h deleted file mode 100644 index 2b44a63..0000000 --- a/librocksdb-sys/lz4/lib/lz4frame_static.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - LZ4 auto-framing library - Header File for static linking only - Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -#ifndef LZ4FRAME_STATIC_H_0398209384 -#define LZ4FRAME_STATIC_H_0398209384 - -/* The declarations that formerly were made here have been merged into - * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward, - * it is recommended to simply include that header directly. - */ - -#define LZ4F_STATIC_LINKING_ONLY -#include "lz4frame.h" - -#endif /* LZ4FRAME_STATIC_H_0398209384 */ diff --git a/librocksdb-sys/lz4/lib/lz4hc.c b/librocksdb-sys/lz4/lib/lz4hc.c deleted file mode 100644 index b21ad6b..0000000 --- a/librocksdb-sys/lz4/lib/lz4hc.c +++ /dev/null @@ -1,1631 +0,0 @@ -/* - LZ4 HC - High Compression Mode of LZ4 - Copyright (C) 2011-2020, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */ - - -/* ************************************* -* Tuning Parameter -***************************************/ - -/*! HEAPMODE : - * Select how default compression function will allocate workplace memory, - * in stack (0:fastest), or in heap (1:requires malloc()). - * Since workplace is rather large, heap mode is recommended. -**/ -#ifndef LZ4HC_HEAPMODE -# define LZ4HC_HEAPMODE 1 -#endif - - -/*=== Dependency ===*/ -#define LZ4_HC_STATIC_LINKING_ONLY -#include "lz4hc.h" - - -/*=== Common definitions ===*/ -#if defined(__GNUC__) -# pragma GCC diagnostic ignored "-Wunused-function" -#endif -#if defined (__clang__) -# pragma clang diagnostic ignored "-Wunused-function" -#endif - -#define LZ4_COMMONDEFS_ONLY -#ifndef LZ4_SRC_INCLUDED -#include "lz4.c" /* LZ4_count, constants, mem */ -#endif - - -/*=== Enums ===*/ -typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive; - - -/*=== Constants ===*/ -#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) -#define LZ4_OPT_NUM (1<<12) - - -/*=== Macros ===*/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) -#define MAX(a,b) ( (a) > (b) ? (a) : (b) ) -#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG)) -#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */ -#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */ -/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */ -#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor - -static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); } - - -/************************************** -* HC Compression -**************************************/ -static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4) -{ - MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable)); - MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); -} - -static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start) -{ - size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart); - size_t newStartingOffset = bufferSize + hc4->dictLimit; - assert(newStartingOffset >= bufferSize); /* check overflow */ - if (newStartingOffset > 1 GB) { - LZ4HC_clearTables(hc4); - newStartingOffset = 0; - } - newStartingOffset += 64 KB; - hc4->nextToUpdate = (U32)newStartingOffset; - hc4->prefixStart = start; - hc4->end = start; - hc4->dictStart = start; - hc4->dictLimit = (U32)newStartingOffset; - hc4->lowLimit = (U32)newStartingOffset; -} - - -/* Update chains up to ip (excluded) */ -LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip) -{ - U16* const chainTable = hc4->chainTable; - U32* const hashTable = hc4->hashTable; - const BYTE* const prefixPtr = hc4->prefixStart; - U32 const prefixIdx = hc4->dictLimit; - U32 const target = (U32)(ip - prefixPtr) + prefixIdx; - U32 idx = hc4->nextToUpdate; - assert(ip >= prefixPtr); - assert(target >= prefixIdx); - - while (idx < target) { - U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx); - size_t delta = idx - hashTable[h]; - if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX; - DELTANEXTU16(chainTable, idx) = (U16)delta; - hashTable[h] = idx; - idx++; - } - - hc4->nextToUpdate = target; -} - -/** LZ4HC_countBack() : - * @return : negative value, nb of common bytes before ip/match */ -LZ4_FORCE_INLINE -int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match, - const BYTE* const iMin, const BYTE* const mMin) -{ - int back = 0; - int const min = (int)MAX(iMin - ip, mMin - match); - assert(min <= 0); - assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31)); - assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31)); - while ( (back > min) - && (ip[back-1] == match[back-1]) ) - back--; - return back; -} - -#if defined(_MSC_VER) -# define LZ4HC_rotl32(x,r) _rotl(x,r) -#else -# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r))) -#endif - - -static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern) -{ - size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3; - if (bitsToRotate == 0) return pattern; - return LZ4HC_rotl32(pattern, (int)bitsToRotate); -} - -/* LZ4HC_countPattern() : - * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */ -static unsigned -LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) -{ - const BYTE* const iStart = ip; - reg_t const pattern = (sizeof(pattern)==8) ? - (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32; - - while (likely(ip < iEnd-(sizeof(pattern)-1))) { - reg_t const diff = LZ4_read_ARCH(ip) ^ pattern; - if (!diff) { ip+=sizeof(pattern); continue; } - ip += LZ4_NbCommonBytes(diff); - return (unsigned)(ip - iStart); - } - - if (LZ4_isLittleEndian()) { - reg_t patternByte = pattern; - while ((ip>= 8; - } - } else { /* big endian */ - U32 bitOffset = (sizeof(pattern)*8) - 8; - while (ip < iEnd) { - BYTE const byte = (BYTE)(pattern >> bitOffset); - if (*ip != byte) break; - ip ++; bitOffset -= 8; - } } - - return (unsigned)(ip - iStart); -} - -/* LZ4HC_reverseCountPattern() : - * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) - * read using natural platform endianness */ -static unsigned -LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) -{ - const BYTE* const iStart = ip; - - while (likely(ip >= iLow+4)) { - if (LZ4_read32(ip-4) != pattern) break; - ip -= 4; - } - { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */ - while (likely(ip>iLow)) { - if (ip[-1] != *bytePtr) break; - ip--; bytePtr--; - } } - return (unsigned)(iStart - ip); -} - -/* LZ4HC_protectDictEnd() : - * Checks if the match is in the last 3 bytes of the dictionary, so reading the - * 4 byte MINMATCH would overflow. - * @returns true if the match index is okay. - */ -static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex) -{ - return ((U32)((dictLimit - 1) - matchIndex) >= 3); -} - -typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e; -typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e; - -LZ4_FORCE_INLINE int -LZ4HC_InsertAndGetWiderMatch ( - LZ4HC_CCtx_internal* const hc4, - const BYTE* const ip, - const BYTE* const iLowLimit, const BYTE* const iHighLimit, - int longest, - const BYTE** matchpos, - const BYTE** startpos, - const int maxNbAttempts, - const int patternAnalysis, const int chainSwap, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed) -{ - U16* const chainTable = hc4->chainTable; - U32* const HashTable = hc4->hashTable; - const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx; - const BYTE* const prefixPtr = hc4->prefixStart; - const U32 prefixIdx = hc4->dictLimit; - const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx; - const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex); - const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX; - const BYTE* const dictStart = hc4->dictStart; - const U32 dictIdx = hc4->lowLimit; - const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx; - int const lookBackLength = (int)(ip-iLowLimit); - int nbAttempts = maxNbAttempts; - U32 matchChainPos = 0; - U32 const pattern = LZ4_read32(ip); - U32 matchIndex; - repeat_state_e repeat = rep_untested; - size_t srcPatternLength = 0; - - DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch"); - /* First Match */ - LZ4HC_Insert(hc4, ip); - matchIndex = HashTable[LZ4HC_hashPtr(ip)]; - DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)", - matchIndex, lowestMatchIndex); - - while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) { - int matchLength=0; - nbAttempts--; - assert(matchIndex < ipIndex); - if (favorDecSpeed && (ipIndex - matchIndex < 8)) { - /* do nothing */ - } else if (matchIndex >= prefixIdx) { /* within current Prefix */ - const BYTE* const matchPtr = prefixPtr + matchIndex - prefixIdx; - assert(matchPtr < ip); - assert(longest >= 1); - if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) { - if (LZ4_read32(matchPtr) == pattern) { - int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0; - matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); - matchLength -= back; - if (matchLength > longest) { - longest = matchLength; - *matchpos = matchPtr + back; - *startpos = ip + back; - } } } - } else { /* lowestMatchIndex <= matchIndex < dictLimit */ - const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx); - assert(matchIndex >= dictIdx); - if ( likely(matchIndex <= prefixIdx - 4) - && (LZ4_read32(matchPtr) == pattern) ) { - int back = 0; - const BYTE* vLimit = ip + (prefixIdx - matchIndex); - if (vLimit > iHighLimit) vLimit = iHighLimit; - matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; - if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) - matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit); - back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0; - matchLength -= back; - if (matchLength > longest) { - longest = matchLength; - *matchpos = prefixPtr - prefixIdx + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */ - *startpos = ip + back; - } } } - - if (chainSwap && matchLength==longest) { /* better match => select a better chain */ - assert(lookBackLength==0); /* search forward only */ - if (matchIndex + (U32)longest <= ipIndex) { - int const kTrigger = 4; - U32 distanceToNextMatch = 1; - int const end = longest - MINMATCH + 1; - int step = 1; - int accel = 1 << kTrigger; - int pos; - for (pos = 0; pos < end; pos += step) { - U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos); - step = (accel++ >> kTrigger); - if (candidateDist > distanceToNextMatch) { - distanceToNextMatch = candidateDist; - matchChainPos = (U32)pos; - accel = 1 << kTrigger; - } } - if (distanceToNextMatch > 1) { - if (distanceToNextMatch > matchIndex) break; /* avoid overflow */ - matchIndex -= distanceToNextMatch; - continue; - } } } - - { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex); - if (patternAnalysis && distNextMatch==1 && matchChainPos==0) { - U32 const matchCandidateIdx = matchIndex-1; - /* may be a repeated pattern */ - if (repeat == rep_untested) { - if ( ((pattern & 0xFFFF) == (pattern >> 16)) - & ((pattern & 0xFF) == (pattern >> 24)) ) { - repeat = rep_confirmed; - srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); - } else { - repeat = rep_not; - } } - if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex) - && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) { - const int extDict = matchCandidateIdx < prefixIdx; - const BYTE* const matchPtr = (extDict ? dictStart - dictIdx : prefixPtr - prefixIdx) + matchCandidateIdx; - if (LZ4_read32(matchPtr) == pattern) { /* good candidate */ - const BYTE* const iLimit = extDict ? dictEnd : iHighLimit; - size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern); - if (extDict && matchPtr + forwardPatternLength == iLimit) { - U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern); - forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern); - } - { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr; - size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern); - size_t currentSegmentLength; - if (!extDict - && matchPtr - backLength == prefixPtr - && dictIdx < prefixIdx) { - U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern); - backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern); - } - /* Limit backLength not go further than lowestMatchIndex */ - backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex); - assert(matchCandidateIdx - backLength >= lowestMatchIndex); - currentSegmentLength = backLength + forwardPatternLength; - /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */ - if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */ - && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */ - U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ - if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) - matchIndex = newMatchIndex; - else { - /* Can only happen if started in the prefix */ - assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); - matchIndex = prefixIdx; - } - } else { - U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */ - if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) { - assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict); - matchIndex = prefixIdx; - } else { - matchIndex = newMatchIndex; - if (lookBackLength==0) { /* no back possible */ - size_t const maxML = MIN(currentSegmentLength, srcPatternLength); - if ((size_t)longest < maxML) { - assert(prefixPtr - prefixIdx + matchIndex != ip); - if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break; - assert(maxML < 2 GB); - longest = (int)maxML; - *matchpos = prefixPtr - prefixIdx + matchIndex; /* virtual pos, relative to ip, to retrieve offset */ - *startpos = ip; - } - { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex); - if (distToNextPattern > matchIndex) break; /* avoid overflow */ - matchIndex -= distToNextPattern; - } } } } } - continue; - } } - } } /* PA optimization */ - - /* follow current chain */ - matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos); - - } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */ - - if ( dict == usingDictCtxHc - && nbAttempts > 0 - && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) { - size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit; - U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)]; - assert(dictEndOffset <= 1 GB); - matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset; - while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) { - const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex; - - if (LZ4_read32(matchPtr) == pattern) { - int mlt; - int back = 0; - const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex); - if (vLimit > iHighLimit) vLimit = iHighLimit; - mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; - back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0; - mlt -= back; - if (mlt > longest) { - longest = mlt; - *matchpos = prefixPtr - prefixIdx + matchIndex + back; - *startpos = ip + back; - } } - - { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex); - dictMatchIndex -= nextOffset; - matchIndex -= nextOffset; - } } } - - return longest; -} - -LZ4_FORCE_INLINE int -LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */ - const BYTE* const ip, const BYTE* const iLimit, - const BYTE** matchpos, - const int maxNbAttempts, - const int patternAnalysis, - const dictCtx_directive dict) -{ - const BYTE* uselessPtr = ip; - /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), - * but this won't be the case here, as we define iLowLimit==ip, - * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ - return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio); -} - -/* LZ4HC_encodeSequence() : - * @return : 0 if ok, - * 1 if buffer issue detected */ -LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( - const BYTE** _ip, - BYTE** _op, - const BYTE** _anchor, - int matchLength, - const BYTE* const match, - limitedOutput_directive limit, - BYTE* oend) -{ -#define ip (*_ip) -#define op (*_op) -#define anchor (*_anchor) - - size_t length; - BYTE* const token = op++; - -#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6) - static const BYTE* start = NULL; - static U32 totalCost = 0; - U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start); - U32 const ll = (U32)(ip - anchor); - U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0; - U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0; - U32 const cost = 1 + llAdd + ll + 2 + mlAdd; - if (start==NULL) start = anchor; /* only works for single segment */ - /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */ - DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u", - pos, - (U32)(ip - anchor), matchLength, (U32)(ip-match), - cost, totalCost); - totalCost += cost; -#endif - - /* Encode Literal length */ - length = (size_t)(ip - anchor); - LZ4_STATIC_ASSERT(notLimited == 0); - /* Check output limit */ - if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) { - DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)", - (int)length, (int)(oend - op)); - return 1; - } - if (length >= RUN_MASK) { - size_t len = length - RUN_MASK; - *token = (RUN_MASK << ML_BITS); - for(; len >= 255 ; len -= 255) *op++ = 255; - *op++ = (BYTE)len; - } else { - *token = (BYTE)(length << ML_BITS); - } - - /* Copy Literals */ - LZ4_wildCopy8(op, anchor, op + length); - op += length; - - /* Encode Offset */ - assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */ - LZ4_writeLE16(op, (U16)(ip - match)); op += 2; - - /* Encode MatchLength */ - assert(matchLength >= MINMATCH); - length = (size_t)matchLength - MINMATCH; - if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) { - DEBUGLOG(6, "Not enough room to write match length"); - return 1; /* Check output limit */ - } - if (length >= ML_MASK) { - *token += ML_MASK; - length -= ML_MASK; - for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; } - if (length >= 255) { length -= 255; *op++ = 255; } - *op++ = (BYTE)length; - } else { - *token += (BYTE)(length); - } - - /* Prepare next loop */ - ip += matchLength; - anchor = ip; - - return 0; -} -#undef ip -#undef op -#undef anchor - -LZ4_FORCE_INLINE int LZ4HC_compress_hashChain ( - LZ4HC_CCtx_internal* const ctx, - const char* const source, - char* const dest, - int* srcSizePtr, - int const maxOutputSize, - int maxNbAttempts, - const limitedOutput_directive limit, - const dictCtx_directive dict - ) -{ - const int inputSize = *srcSizePtr; - const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */ - - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + inputSize; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = (iend - LASTLITERALS); - - BYTE* optr = (BYTE*) dest; - BYTE* op = (BYTE*) dest; - BYTE* oend = op + maxOutputSize; - - int ml0, ml, ml2, ml3; - const BYTE* start0; - const BYTE* ref0; - const BYTE* ref = NULL; - const BYTE* start2 = NULL; - const BYTE* ref2 = NULL; - const BYTE* start3 = NULL; - const BYTE* ref3 = NULL; - - /* init */ - *srcSizePtr = 0; - if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ - if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ - - /* Main Loop */ - while (ip <= mflimit) { - ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict); - if (ml encode ML1 */ - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; - continue; - } - - if (start0 < ip) { /* first match was skipped at least once */ - if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */ - ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */ - } } - - /* Here, start0==ip */ - if ((start2 - ip) < 3) { /* First Match too small : removed */ - ml = ml2; - ip = start2; - ref =ref2; - goto _Search2; - } - -_Search3: - /* At this stage, we have : - * ml2 > ml1, and - * ip1+3 <= ip2 (usually < ip1+ml1) */ - if ((start2 - ip) < OPTIMAL_ML) { - int correction; - int new_ml = ml; - if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; - if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH; - correction = new_ml - (int)(start2 - ip); - if (correction > 0) { - start2 += correction; - ref2 += correction; - ml2 -= correction; - } - } - /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */ - - if (start2 + ml2 <= mflimit) { - ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, - start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, - maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio); - } else { - ml3 = ml2; - } - - if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */ - /* ip & ref are known; Now for ml */ - if (start2 < ip+ml) ml = (int)(start2 - ip); - /* Now, encode 2 sequences */ - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; - ip = start2; - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) { - ml = ml2; - ref = ref2; - goto _dest_overflow; - } - continue; - } - - if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */ - if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */ - if (start2 < ip+ml) { - int correction = (int)(ip+ml - start2); - start2 += correction; - ref2 += correction; - ml2 -= correction; - if (ml2 < MINMATCH) { - start2 = start3; - ref2 = ref3; - ml2 = ml3; - } - } - - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; - ip = start3; - ref = ref3; - ml = ml3; - - start0 = start2; - ref0 = ref2; - ml0 = ml2; - goto _Search2; - } - - start2 = start3; - ref2 = ref3; - ml2 = ml3; - goto _Search3; - } - - /* - * OK, now we have 3 ascending matches; - * let's write the first one ML1. - * ip & ref are known; Now decide ml. - */ - if (start2 < ip+ml) { - if ((start2 - ip) < OPTIMAL_ML) { - int correction; - if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; - if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; - correction = ml - (int)(start2 - ip); - if (correction > 0) { - start2 += correction; - ref2 += correction; - ml2 -= correction; - } - } else { - ml = (int)(start2 - ip); - } - } - optr = op; - if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow; - - /* ML2 becomes ML1 */ - ip = start2; ref = ref2; ml = ml2; - - /* ML3 becomes ML2 */ - start2 = start3; ref2 = ref3; ml2 = ml3; - - /* let's find a new ML3 */ - goto _Search3; - } - -_last_literals: - /* Encode Last Literals */ - { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ - size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; - size_t const totalSize = 1 + llAdd + lastRunSize; - if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ - if (limit && (op + totalSize > oend)) { - if (limit == limitedOutput) return 0; - /* adapt lastRunSize to fill 'dest' */ - lastRunSize = (size_t)(oend - op) - 1 /*token*/; - llAdd = (lastRunSize + 256 - RUN_MASK) / 256; - lastRunSize -= llAdd; - } - DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); - ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ - - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = (RUN_MASK << ML_BITS); - for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRunSize << ML_BITS); - } - LZ4_memcpy(op, anchor, lastRunSize); - op += lastRunSize; - } - - /* End */ - *srcSizePtr = (int) (((const char*)ip) - source); - return (int) (((char*)op)-dest); - -_dest_overflow: - if (limit == fillOutput) { - /* Assumption : ip, anchor, ml and ref must be set correctly */ - size_t const ll = (size_t)(ip - anchor); - size_t const ll_addbytes = (ll + 240) / 255; - size_t const ll_totalCost = 1 + ll_addbytes + ll; - BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ - DEBUGLOG(6, "Last sequence overflowing"); - op = optr; /* restore correct out pointer */ - if (op + ll_totalCost <= maxLitPos) { - /* ll validated; now adjust match length */ - size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); - size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); - assert(maxMlSize < INT_MAX); assert(ml >= 0); - if ((size_t)ml > maxMlSize) ml = (int)maxMlSize; - if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) { - LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend); - } } - goto _last_literals; - } - /* compression failed */ - return 0; -} - - -static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx, - const char* const source, char* dst, - int* srcSizePtr, int dstCapacity, - int const nbSearches, size_t sufficient_len, - const limitedOutput_directive limit, int const fullUpdate, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed); - - -LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - const limitedOutput_directive limit, - const dictCtx_directive dict - ) -{ - typedef enum { lz4hc, lz4opt } lz4hc_strat_e; - typedef struct { - lz4hc_strat_e strat; - int nbSearches; - U32 targetLength; - } cParams_t; - static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = { - { lz4hc, 2, 16 }, /* 0, unused */ - { lz4hc, 2, 16 }, /* 1, unused */ - { lz4hc, 2, 16 }, /* 2, unused */ - { lz4hc, 4, 16 }, /* 3 */ - { lz4hc, 8, 16 }, /* 4 */ - { lz4hc, 16, 16 }, /* 5 */ - { lz4hc, 32, 16 }, /* 6 */ - { lz4hc, 64, 16 }, /* 7 */ - { lz4hc, 128, 16 }, /* 8 */ - { lz4hc, 256, 16 }, /* 9 */ - { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/ - { lz4opt, 512,128 }, /*11 */ - { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */ - }; - - DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)", - ctx, src, *srcSizePtr, limit); - - if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */ - if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */ - - ctx->end += *srcSizePtr; - if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */ - cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel); - { cParams_t const cParam = clTable[cLevel]; - HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio; - int result; - - if (cParam.strat == lz4hc) { - result = LZ4HC_compress_hashChain(ctx, - src, dst, srcSizePtr, dstCapacity, - cParam.nbSearches, limit, dict); - } else { - assert(cParam.strat == lz4opt); - result = LZ4HC_compress_optimal(ctx, - src, dst, srcSizePtr, dstCapacity, - cParam.nbSearches, cParam.targetLength, limit, - cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */ - dict, favor); - } - if (result <= 0) ctx->dirty = 1; - return result; - } -} - -static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock); - -static int -LZ4HC_compress_generic_noDictCtx ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - limitedOutput_directive limit - ) -{ - assert(ctx->dictCtx == NULL); - return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx); -} - -static int -LZ4HC_compress_generic_dictCtx ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - limitedOutput_directive limit - ) -{ - const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit); - assert(ctx->dictCtx != NULL); - if (position >= 64 KB) { - ctx->dictCtx = NULL; - return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } else if (position == 0 && *srcSizePtr > 4 KB) { - LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal)); - LZ4HC_setExternalDict(ctx, (const BYTE *)src); - ctx->compressionLevel = (short)cLevel; - return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } else { - return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc); - } -} - -static int -LZ4HC_compress_generic ( - LZ4HC_CCtx_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - int const dstCapacity, - int cLevel, - limitedOutput_directive limit - ) -{ - if (ctx->dictCtx == NULL) { - return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } else { - return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); - } -} - - -int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); } - -static size_t LZ4_streamHC_t_alignment(void) -{ -#if LZ4_ALIGN_TEST - typedef struct { char c; LZ4_streamHC_t t; } t_a; - return sizeof(t_a) - sizeof(LZ4_streamHC_t); -#else - return 1; /* effectively disabled */ -#endif -} - -/* state is presumed correctly initialized, - * in which case its size and alignment have already been validate */ -int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) -{ - LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse; - if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0; - LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel); - LZ4HC_init_internal (ctx, (const BYTE*)src); - if (dstCapacity < LZ4_compressBound(srcSize)) - return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput); - else - return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited); -} - -int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) -{ - LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); - if (ctx==NULL) return 0; /* init failure */ - return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel); -} - -int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) -{ - int cSize; -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t)); - if (statePtr==NULL) return 0; -#else - LZ4_streamHC_t state; - LZ4_streamHC_t* const statePtr = &state; -#endif - cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel); -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - FREEMEM(statePtr); -#endif - return cSize; -} - -/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */ -int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) -{ - LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx)); - if (ctx==NULL) return 0; /* init failure */ - LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source); - LZ4_setCompressionLevel(ctx, cLevel); - return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput); -} - - - -/************************************** -* Streaming Functions -**************************************/ -/* allocation */ -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -LZ4_streamHC_t* LZ4_createStreamHC(void) -{ - LZ4_streamHC_t* const state = - (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t)); - if (state == NULL) return NULL; - LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT); - return state; -} - -int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) -{ - DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr); - if (!LZ4_streamHCPtr) return 0; /* support free on NULL */ - FREEMEM(LZ4_streamHCPtr); - return 0; -} -#endif - - -LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size) -{ - LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer; - DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size); - /* check conditions */ - if (buffer == NULL) return NULL; - if (size < sizeof(LZ4_streamHC_t)) return NULL; - if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL; - /* init */ - { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse); - MEM_INIT(hcstate, 0, sizeof(*hcstate)); } - LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT); - return LZ4_streamHCPtr; -} - -/* just a stub */ -void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) -{ - LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); - LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); -} - -void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) -{ - DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel); - if (LZ4_streamHCPtr->internal_donotuse.dirty) { - LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); - } else { - /* preserve end - prefixStart : can trigger clearTable's threshold */ - if (LZ4_streamHCPtr->internal_donotuse.end != NULL) { - LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.prefixStart; - } else { - assert(LZ4_streamHCPtr->internal_donotuse.prefixStart == NULL); - } - LZ4_streamHCPtr->internal_donotuse.prefixStart = NULL; - LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL; - } - LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); -} - -void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) -{ - DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel); - if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT; - if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; - LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel; -} - -void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor) -{ - LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0); -} - -/* LZ4_loadDictHC() : - * LZ4_streamHCPtr is presumed properly initialized */ -int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, - const char* dictionary, int dictSize) -{ - LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; - DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize); - assert(LZ4_streamHCPtr != NULL); - if (dictSize > 64 KB) { - dictionary += (size_t)dictSize - 64 KB; - dictSize = 64 KB; - } - /* need a full initialization, there are bad side-effects when using resetFast() */ - { int const cLevel = ctxPtr->compressionLevel; - LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr)); - LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel); - } - LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary); - ctxPtr->end = (const BYTE*)dictionary + dictSize; - if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); - return dictSize; -} - -void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) { - working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL; -} - -/* compression */ - -static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock) -{ - DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock); - if (ctxPtr->end >= ctxPtr->prefixStart + 4) - LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ - - /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ - ctxPtr->lowLimit = ctxPtr->dictLimit; - ctxPtr->dictStart = ctxPtr->prefixStart; - ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart); - ctxPtr->prefixStart = newBlock; - ctxPtr->end = newBlock; - ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */ - - /* cannot reference an extDict and a dictCtx at the same time */ - ctxPtr->dictCtx = NULL; -} - -static int -LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr, - const char* src, char* dst, - int* srcSizePtr, int dstCapacity, - limitedOutput_directive limit) -{ - LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; - DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)", - LZ4_streamHCPtr, src, *srcSizePtr, limit); - assert(ctxPtr != NULL); - /* auto-init if forgotten */ - if (ctxPtr->prefixStart == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src); - - /* Check overflow */ - if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) { - size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart); - if (dictSize > 64 KB) dictSize = 64 KB; - LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize); - } - - /* Check if blocks follow each other */ - if ((const BYTE*)src != ctxPtr->end) - LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src); - - /* Check overlapping input/dictionary space */ - { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr; - const BYTE* const dictBegin = ctxPtr->dictStart; - const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit); - if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) { - if (sourceEnd > dictEnd) sourceEnd = dictEnd; - ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart); - ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart); - if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) { - ctxPtr->lowLimit = ctxPtr->dictLimit; - ctxPtr->dictStart = ctxPtr->prefixStart; - } } } - - return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit); -} - -int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity) -{ - if (dstCapacity < LZ4_compressBound(srcSize)) - return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput); - else - return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited); -} - -int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize) -{ - return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput); -} - - - -/* LZ4_saveDictHC : - * save history content - * into a user-provided buffer - * which is then used to continue compression - */ -int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize) -{ - LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse; - int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart); - DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize); - assert(prefixSize >= 0); - if (dictSize > 64 KB) dictSize = 64 KB; - if (dictSize < 4) dictSize = 0; - if (dictSize > prefixSize) dictSize = prefixSize; - if (safeBuffer == NULL) assert(dictSize == 0); - if (dictSize > 0) - LZ4_memmove(safeBuffer, streamPtr->end - dictSize, dictSize); - { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit; - streamPtr->end = (const BYTE*)safeBuffer + dictSize; - streamPtr->prefixStart = streamPtr->end - dictSize; - streamPtr->dictLimit = endIndex - (U32)dictSize; - streamPtr->lowLimit = endIndex - (U32)dictSize; - streamPtr->dictStart = streamPtr->prefixStart; - if (streamPtr->nextToUpdate < streamPtr->dictLimit) - streamPtr->nextToUpdate = streamPtr->dictLimit; - } - return dictSize; -} - - -/*************************************************** -* Deprecated Functions -***************************************************/ - -/* These functions currently generate deprecation warnings */ - -/* Wrappers for deprecated compression functions */ -int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); } -int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); } -int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); } -int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); } -int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); } -int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); } -int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); } -int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); } -int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); } -int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); } - - -/* Deprecated streaming functions */ -int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); } - -/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t) - * @return : 0 on success, !=0 if error */ -int LZ4_resetStreamStateHC(void* state, char* inputBuffer) -{ - LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4)); - if (hc4 == NULL) return 1; /* init failed */ - LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); - return 0; -} - -#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) -void* LZ4_createHC (const char* inputBuffer) -{ - LZ4_streamHC_t* const hc4 = LZ4_createStreamHC(); - if (hc4 == NULL) return NULL; /* not enough memory */ - LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer); - return hc4; -} - -int LZ4_freeHC (void* LZ4HC_Data) -{ - if (!LZ4HC_Data) return 0; /* support free on NULL */ - FREEMEM(LZ4HC_Data); - return 0; -} -#endif - -int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel) -{ - return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited); -} - -int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel) -{ - return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput); -} - -char* LZ4_slideInputBufferHC(void* LZ4HC_Data) -{ - LZ4_streamHC_t* const ctx = (LZ4_streamHC_t*)LZ4HC_Data; - const BYTE* bufferStart = ctx->internal_donotuse.prefixStart - ctx->internal_donotuse.dictLimit + ctx->internal_donotuse.lowLimit; - LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel); - /* avoid const char * -> char * conversion warning :( */ - return (char*)(uptrval)bufferStart; -} - - -/* ================================================ - * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX]) - * ===============================================*/ -typedef struct { - int price; - int off; - int mlen; - int litlen; -} LZ4HC_optimal_t; - -/* price in bytes */ -LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) -{ - int price = litlen; - assert(litlen >= 0); - if (litlen >= (int)RUN_MASK) - price += 1 + ((litlen-(int)RUN_MASK) / 255); - return price; -} - - -/* requires mlen >= MINMATCH */ -LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) -{ - int price = 1 + 2 ; /* token + 16-bit offset */ - assert(litlen >= 0); - assert(mlen >= MINMATCH); - - price += LZ4HC_literalsPrice(litlen); - - if (mlen >= (int)(ML_MASK+MINMATCH)) - price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255); - - return price; -} - - -typedef struct { - int off; - int len; -} LZ4HC_match_t; - -LZ4_FORCE_INLINE LZ4HC_match_t -LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, - const BYTE* ip, const BYTE* const iHighLimit, - int minLen, int nbSearches, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed) -{ - LZ4HC_match_t match = { 0 , 0 }; - const BYTE* matchPtr = NULL; - /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), - * but this won't be the case here, as we define iLowLimit==ip, - * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ - int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed); - if (matchLength <= minLen) return match; - if (favorDecSpeed) { - if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */ - } - match.len = matchLength; - match.off = (int)(ip-matchPtr); - return match; -} - - -static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx, - const char* const source, - char* dst, - int* srcSizePtr, - int dstCapacity, - int const nbSearches, - size_t sufficient_len, - const limitedOutput_directive limit, - int const fullUpdate, - const dictCtx_directive dict, - const HCfavor_e favorDecSpeed) -{ - int retval = 0; -#define TRAILING_LITERALS 3 -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS)); -#else - LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */ -#endif - - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + *srcSizePtr; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = iend - LASTLITERALS; - BYTE* op = (BYTE*) dst; - BYTE* opSaved = (BYTE*) dst; - BYTE* oend = op + dstCapacity; - int ovml = MINMATCH; /* overflow - last sequence */ - const BYTE* ovref = NULL; - - /* init */ -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - if (opt == NULL) goto _return_label; -#endif - DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity); - *srcSizePtr = 0; - if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ - if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; - - /* Main Loop */ - while (ip <= mflimit) { - int const llen = (int)(ip - anchor); - int best_mlen, best_off; - int cur, last_match_pos = 0; - - LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); - if (firstMatch.len==0) { ip++; continue; } - - if ((size_t)firstMatch.len > sufficient_len) { - /* good enough solution : immediate encoding */ - int const firstML = firstMatch.len; - const BYTE* const matchPos = ip - firstMatch.off; - opSaved = op; - if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */ - ovml = firstML; - ovref = matchPos; - goto _dest_overflow; - } - continue; - } - - /* set prices for first positions (literals) */ - { int rPos; - for (rPos = 0 ; rPos < MINMATCH ; rPos++) { - int const cost = LZ4HC_literalsPrice(llen + rPos); - opt[rPos].mlen = 1; - opt[rPos].off = 0; - opt[rPos].litlen = llen + rPos; - opt[rPos].price = cost; - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", - rPos, cost, opt[rPos].litlen); - } } - /* set prices using initial match */ - { int mlen = MINMATCH; - int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ - int const offset = firstMatch.off; - assert(matchML < LZ4_OPT_NUM); - for ( ; mlen <= matchML ; mlen++) { - int const cost = LZ4HC_sequencePrice(llen, mlen); - opt[mlen].mlen = mlen; - opt[mlen].off = offset; - opt[mlen].litlen = llen; - opt[mlen].price = cost; - DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", - mlen, cost, mlen); - } } - last_match_pos = firstMatch.len; - { int addLit; - for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { - opt[last_match_pos+addLit].mlen = 1; /* literal */ - opt[last_match_pos+addLit].off = 0; - opt[last_match_pos+addLit].litlen = addLit; - opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", - last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); - } } - - /* check further positions */ - for (cur = 1; cur < last_match_pos; cur++) { - const BYTE* const curPtr = ip + cur; - LZ4HC_match_t newMatch; - - if (curPtr > mflimit) break; - DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", - cur, opt[cur].price, opt[cur+1].price, cur+1); - if (fullUpdate) { - /* not useful to search here if next position has same (or lower) cost */ - if ( (opt[cur+1].price <= opt[cur].price) - /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ - && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) - continue; - } else { - /* not useful to search here if next position has same (or lower) cost */ - if (opt[cur+1].price <= opt[cur].price) continue; - } - - DEBUGLOG(7, "search at rPos:%u", cur); - if (fullUpdate) - newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); - else - /* only test matches of minimum length; slightly faster, but misses a few bytes */ - newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed); - if (!newMatch.len) continue; - - if ( ((size_t)newMatch.len > sufficient_len) - || (newMatch.len + cur >= LZ4_OPT_NUM) ) { - /* immediate encoding */ - best_mlen = newMatch.len; - best_off = newMatch.off; - last_match_pos = cur + 1; - goto encode; - } - - /* before match : set price with literals at beginning */ - { int const baseLitlen = opt[cur].litlen; - int litlen; - for (litlen = 1; litlen < MINMATCH; litlen++) { - int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); - int const pos = cur + litlen; - if (price < opt[pos].price) { - opt[pos].mlen = 1; /* literal */ - opt[pos].off = 0; - opt[pos].litlen = baseLitlen+litlen; - opt[pos].price = price; - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", - pos, price, opt[pos].litlen); - } } } - - /* set prices using match at position = cur */ - { int const matchML = newMatch.len; - int ml = MINMATCH; - - assert(cur + newMatch.len < LZ4_OPT_NUM); - for ( ; ml <= matchML ; ml++) { - int const pos = cur + ml; - int const offset = newMatch.off; - int price; - int ll; - DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", - pos, last_match_pos); - if (opt[cur].mlen == 1) { - ll = opt[cur].litlen; - price = ((cur > ll) ? opt[cur - ll].price : 0) - + LZ4HC_sequencePrice(ll, ml); - } else { - ll = 0; - price = opt[cur].price + LZ4HC_sequencePrice(0, ml); - } - - assert((U32)favorDecSpeed <= 1); - if (pos > last_match_pos+TRAILING_LITERALS - || price <= opt[pos].price - (int)favorDecSpeed) { - DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", - pos, price, ml); - assert(pos < LZ4_OPT_NUM); - if ( (ml == matchML) /* last pos of last match */ - && (last_match_pos < pos) ) - last_match_pos = pos; - opt[pos].mlen = ml; - opt[pos].off = offset; - opt[pos].litlen = ll; - opt[pos].price = price; - } } } - /* complete following positions with literals */ - { int addLit; - for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { - opt[last_match_pos+addLit].mlen = 1; /* literal */ - opt[last_match_pos+addLit].off = 0; - opt[last_match_pos+addLit].litlen = addLit; - opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); - } } - } /* for (cur = 1; cur <= last_match_pos; cur++) */ - - assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS); - best_mlen = opt[last_match_pos].mlen; - best_off = opt[last_match_pos].off; - cur = last_match_pos - best_mlen; - -encode: /* cur, last_match_pos, best_mlen, best_off must be set */ - assert(cur < LZ4_OPT_NUM); - assert(last_match_pos >= 1); /* == 1 when only one candidate */ - DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos); - { int candidate_pos = cur; - int selected_matchLength = best_mlen; - int selected_offset = best_off; - while (1) { /* from end to beginning */ - int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ - int const next_offset = opt[candidate_pos].off; - DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength); - opt[candidate_pos].mlen = selected_matchLength; - opt[candidate_pos].off = selected_offset; - selected_matchLength = next_matchLength; - selected_offset = next_offset; - if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ - assert(next_matchLength > 0); /* can be 1, means literal */ - candidate_pos -= next_matchLength; - } } - - /* encode all recorded sequences in order */ - { int rPos = 0; /* relative position (to ip) */ - while (rPos < last_match_pos) { - int const ml = opt[rPos].mlen; - int const offset = opt[rPos].off; - if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ - rPos += ml; - assert(ml >= MINMATCH); - assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX)); - opSaved = op; - if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */ - ovml = ml; - ovref = ip - offset; - goto _dest_overflow; - } } } - } /* while (ip <= mflimit) */ - -_last_literals: - /* Encode Last Literals */ - { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ - size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255; - size_t const totalSize = 1 + llAdd + lastRunSize; - if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */ - if (limit && (op + totalSize > oend)) { - if (limit == limitedOutput) { /* Check output limit */ - retval = 0; - goto _return_label; - } - /* adapt lastRunSize to fill 'dst' */ - lastRunSize = (size_t)(oend - op) - 1 /*token*/; - llAdd = (lastRunSize + 256 - RUN_MASK) / 256; - lastRunSize -= llAdd; - } - DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize); - ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */ - - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = (RUN_MASK << ML_BITS); - for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRunSize << ML_BITS); - } - LZ4_memcpy(op, anchor, lastRunSize); - op += lastRunSize; - } - - /* End */ - *srcSizePtr = (int) (((const char*)ip) - source); - retval = (int) ((char*)op-dst); - goto _return_label; - -_dest_overflow: -if (limit == fillOutput) { - /* Assumption : ip, anchor, ovml and ovref must be set correctly */ - size_t const ll = (size_t)(ip - anchor); - size_t const ll_addbytes = (ll + 240) / 255; - size_t const ll_totalCost = 1 + ll_addbytes + ll; - BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */ - DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved)); - op = opSaved; /* restore correct out pointer */ - if (op + ll_totalCost <= maxLitPos) { - /* ll validated; now adjust match length */ - size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost)); - size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255); - assert(maxMlSize < INT_MAX); assert(ovml >= 0); - if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize; - if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) { - DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml); - DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor); - LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend); - DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor); - } } - goto _last_literals; -} -_return_label: -#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - FREEMEM(opt); -#endif - return retval; -} diff --git a/librocksdb-sys/lz4/lib/lz4hc.h b/librocksdb-sys/lz4/lib/lz4hc.h deleted file mode 100644 index e937acf..0000000 --- a/librocksdb-sys/lz4/lib/lz4hc.h +++ /dev/null @@ -1,413 +0,0 @@ -/* - LZ4 HC - High Compression Mode of LZ4 - Header File - Copyright (C) 2011-2020, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -#ifndef LZ4_HC_H_19834876238432 -#define LZ4_HC_H_19834876238432 - -#if defined (__cplusplus) -extern "C" { -#endif - -/* --- Dependency --- */ -/* note : lz4hc requires lz4.h/lz4.c for compilation */ -#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */ - - -/* --- Useful constants --- */ -#define LZ4HC_CLEVEL_MIN 3 -#define LZ4HC_CLEVEL_DEFAULT 9 -#define LZ4HC_CLEVEL_OPT_MIN 10 -#define LZ4HC_CLEVEL_MAX 12 - - -/*-************************************ - * Block Compression - **************************************/ -/*! LZ4_compress_HC() : - * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm. - * `dst` must be already allocated. - * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h") - * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h") - * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work. - * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX. - * @return : the number of bytes written into 'dst' - * or 0 if compression fails. - */ -LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel); - - -/* Note : - * Decompression functions are provided within "lz4.h" (BSD license) - */ - - -/*! LZ4_compress_HC_extStateHC() : - * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`. - * `state` size is provided by LZ4_sizeofStateHC(). - * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly). - */ -LZ4LIB_API int LZ4_sizeofStateHC(void); -LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel); - - -/*! LZ4_compress_HC_destSize() : v1.9.0+ - * Will compress as much data as possible from `src` - * to fit into `targetDstSize` budget. - * Result is provided in 2 parts : - * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) - * or 0 if compression fails. - * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src` - */ -LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize, - int compressionLevel); - - -/*-************************************ - * Streaming Compression - * Bufferless synchronous API - **************************************/ - typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */ - -/*! LZ4_createStreamHC() and LZ4_freeStreamHC() : - * These functions create and release memory for LZ4 HC streaming state. - * Newly created states are automatically initialized. - * A same state can be used multiple times consecutively, - * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks. - */ -LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void); -LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr); - -/* - These functions compress data in successive blocks of any size, - using previous blocks as dictionary, to improve compression ratio. - One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks. - There is an exception for ring buffers, which can be smaller than 64 KB. - Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue(). - - Before starting compression, state must be allocated and properly initialized. - LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT. - - Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream) - or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental). - LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once, - which is automatically the case when state is created using LZ4_createStreamHC(). - - After reset, a first "fictional block" can be designated as initial dictionary, - using LZ4_loadDictHC() (Optional). - - Invoke LZ4_compress_HC_continue() to compress each successive block. - The number of blocks is unlimited. - Previous input blocks, including initial dictionary when present, - must remain accessible and unmodified during compression. - - It's allowed to update compression level anytime between blocks, - using LZ4_setCompressionLevel() (experimental). - - 'dst' buffer should be sized to handle worst case scenarios - (see LZ4_compressBound(), it ensures compression success). - In case of failure, the API does not guarantee recovery, - so the state _must_ be reset. - To ensure compression success - whenever `dst` buffer size cannot be made >= LZ4_compressBound(), - consider using LZ4_compress_HC_continue_destSize(). - - Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks, - it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC(). - Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB) - - After completing a streaming compression, - it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state, - just by resetting it, using LZ4_resetStreamHC_fast(). -*/ - -LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */ -LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize); - -LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr, - const char* src, char* dst, - int srcSize, int maxDstSize); - -/*! LZ4_compress_HC_continue_destSize() : v1.9.0+ - * Similar to LZ4_compress_HC_continue(), - * but will read as much data as possible from `src` - * to fit into `targetDstSize` budget. - * Result is provided into 2 parts : - * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize) - * or 0 if compression fails. - * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`. - * Note that this function may not consume the entire input. - */ -LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize); - -LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize); - - - -/*^********************************************** - * !!!!!! STATIC LINKING ONLY !!!!!! - ***********************************************/ - -/*-****************************************************************** - * PRIVATE DEFINITIONS : - * Do not use these definitions directly. - * They are merely exposed to allow static allocation of `LZ4_streamHC_t`. - * Declare an `LZ4_streamHC_t` directly, rather than any type below. - * Even then, only do so in the context of static linking, as definitions may change between versions. - ********************************************************************/ - -#define LZ4HC_DICTIONARY_LOGSIZE 16 -#define LZ4HC_MAXD (1<= LZ4HC_CLEVEL_OPT_MIN. - */ -LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed( - LZ4_streamHC_t* LZ4_streamHCPtr, int favor); - -/*! LZ4_resetStreamHC_fast() : v1.9.0+ - * When an LZ4_streamHC_t is known to be in a internally coherent state, - * it can often be prepared for a new compression with almost no work, only - * sometimes falling back to the full, expensive reset that is always required - * when the stream is in an indeterminate state (i.e., the reset performed by - * LZ4_resetStreamHC()). - * - * LZ4_streamHCs are guaranteed to be in a valid state when: - * - returned from LZ4_createStreamHC() - * - reset by LZ4_resetStreamHC() - * - memset(stream, 0, sizeof(LZ4_streamHC_t)) - * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast() - * - the stream was in a valid state and was then used in any compression call - * that returned success - * - the stream was in an indeterminate state and was used in a compression - * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that - * returned success - * - * Note: - * A stream that was last used in a compression call that returned an error - * may be passed to this function. However, it will be fully reset, which will - * clear any existing history and settings from the context. - */ -LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast( - LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); - -/*! LZ4_compress_HC_extStateHC_fastReset() : - * A variant of LZ4_compress_HC_extStateHC(). - * - * Using this variant avoids an expensive initialization step. It is only safe - * to call if the state buffer is known to be correctly initialized already - * (see above comment on LZ4_resetStreamHC_fast() for a definition of - * "correctly initialized"). From a high level, the difference is that this - * function initializes the provided state with a call to - * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a - * call to LZ4_resetStreamHC(). - */ -LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset ( - void* state, - const char* src, char* dst, - int srcSize, int dstCapacity, - int compressionLevel); - -/*! LZ4_attach_HC_dictionary() : - * This is an experimental API that allows for the efficient use of a - * static dictionary many times. - * - * Rather than re-loading the dictionary buffer into a working context before - * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a - * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism, - * in which the working stream references the dictionary stream in-place. - * - * Several assumptions are made about the state of the dictionary stream. - * Currently, only streams which have been prepared by LZ4_loadDictHC() should - * be expected to work. - * - * Alternatively, the provided dictionary stream pointer may be NULL, in which - * case any existing dictionary stream is unset. - * - * A dictionary should only be attached to a stream without any history (i.e., - * a stream that has just been reset). - * - * The dictionary will remain attached to the working stream only for the - * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the - * dictionary context association from the working stream. The dictionary - * stream (and source buffer) must remain in-place / accessible / unchanged - * through the lifetime of the stream session. - */ -LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary( - LZ4_streamHC_t *working_stream, - const LZ4_streamHC_t *dictionary_stream); - -#if defined (__cplusplus) -} -#endif - -#endif /* LZ4_HC_SLO_098092834 */ -#endif /* LZ4_HC_STATIC_LINKING_ONLY */ diff --git a/librocksdb-sys/lz4/lib/xxhash.c b/librocksdb-sys/lz4/lib/xxhash.c deleted file mode 100644 index ff28749..0000000 --- a/librocksdb-sys/lz4/lib/xxhash.c +++ /dev/null @@ -1,1030 +0,0 @@ -/* -* xxHash - Fast Hash algorithm -* Copyright (C) 2012-2016, Yann Collet -* -* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are -* met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following disclaimer -* in the documentation and/or other materials provided with the -* distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* -* You can contact the author at : -* - xxHash homepage: http://www.xxhash.com -* - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - - -/* ************************************* -* Tuning parameters -***************************************/ -/*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ - || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ - || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ - || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ - || defined(__ARM_ARCH_7S__) )) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. - * When this macro is enabled, xxHash actively checks input for null pointer. - * It it is, result for null input pointers is the same as a null-length input. - */ -#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ -# define XXH_ACCEPT_NULL_INPUT_POINTER 0 -#endif - -/*!XXH_FORCE_NATIVE_FORMAT : - * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. - * Results are therefore identical for little-endian and big-endian CPU. - * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. - * Should endian-independence be of no importance for your application, you may set the #define below to 1, - * to improve speed for Big-endian CPU. - * This option has no impact on Little_Endian CPU. - */ -#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ -# define XXH_FORCE_NATIVE_FORMAT 0 -#endif - -/*!XXH_FORCE_ALIGN_CHECK : - * This is a minor performance trick, only useful with lots of very small keys. - * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; - * set it to 0 when the input is guaranteed to be aligned, - * or when alignment doesn't matter for performance. - */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - - -/* ************************************* -* Includes & Memory related functions -***************************************/ -/*! Modify the local functions below should you wish to use some other memory routines -* for malloc(), free() */ -#include -static void* XXH_malloc(size_t s) { return malloc(s); } -static void XXH_free (void* p) { free(p); } -/*! and for memcpy() */ -#include -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } - -#include /* assert */ - -#define XXH_STATIC_LINKING_ONLY -#include "xxhash.h" - - -/* ************************************* -* Compiler Specific Options -***************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# define FORCE_INLINE static __forceinline -#else -# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -# else -# define FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -#endif - - -/* ************************************* -* Basic Types -***************************************/ -#ifndef MEM_MODULE -# if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; -# else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; -# endif -#endif - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; } __attribute__((packed)) unalign; -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ -static U32 XXH_read32(const void* memPtr) -{ - U32 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - - -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ -#if defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) -#endif - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -#else -static U32 XXH_swap32 (U32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -#endif - - -/* ************************************* -* Architecture Macros -***************************************/ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; - -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ -#ifndef XXH_CPU_LITTLE_ENDIAN -static int XXH_isLittleEndian(void) -{ - const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ - return one.c[0]; -} -# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() -#endif - - -/* *************************** -* Memory reads -*****************************/ -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; - -FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); - else - return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); -} - -FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE32_align(ptr, endian, XXH_unaligned); -} - -static U32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); -} - - -/* ************************************* -* Macros -***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } - - -/* ******************************************************************* -* 32-bit hash functions -*********************************************************************/ -static const U32 PRIME32_1 = 2654435761U; -static const U32 PRIME32_2 = 2246822519U; -static const U32 PRIME32_3 = 3266489917U; -static const U32 PRIME32_4 = 668265263U; -static const U32 PRIME32_5 = 374761393U; - -static U32 XXH32_round(U32 seed, U32 input) -{ - seed += input * PRIME32_2; - seed = XXH_rotl32(seed, 13); - seed *= PRIME32_1; - return seed; -} - -/* mix all bits */ -static U32 XXH32_avalanche(U32 h32) -{ - h32 ^= h32 >> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - return(h32); -} - -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) - -static U32 -XXH32_finalize(U32 h32, const void* ptr, size_t len, - XXH_endianess endian, XXH_alignment align) - -{ - const BYTE* p = (const BYTE*)ptr; - -#define PROCESS1 \ - h32 += (*p++) * PRIME32_5; \ - h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; - -#define PROCESS4 \ - h32 += XXH_get32bits(p) * PRIME32_3; \ - p+=4; \ - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - - switch(len&15) /* or switch(bEnd - p) */ - { - case 12: PROCESS4; - /* fallthrough */ - case 8: PROCESS4; - /* fallthrough */ - case 4: PROCESS4; - return XXH32_avalanche(h32); - - case 13: PROCESS4; - /* fallthrough */ - case 9: PROCESS4; - /* fallthrough */ - case 5: PROCESS4; - PROCESS1; - return XXH32_avalanche(h32); - - case 14: PROCESS4; - /* fallthrough */ - case 10: PROCESS4; - /* fallthrough */ - case 6: PROCESS4; - PROCESS1; - PROCESS1; - return XXH32_avalanche(h32); - - case 15: PROCESS4; - /* fallthrough */ - case 11: PROCESS4; - /* fallthrough */ - case 7: PROCESS4; - /* fallthrough */ - case 3: PROCESS1; - /* fallthrough */ - case 2: PROCESS1; - /* fallthrough */ - case 1: PROCESS1; - /* fallthrough */ - case 0: return XXH32_avalanche(h32); - } - assert(0); - return h32; /* reaching this point is deemed impossible */ -} - - -FORCE_INLINE U32 -XXH32_endian_align(const void* input, size_t len, U32 seed, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U32 h32; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)16; - } -#endif - - if (len>=16) { - const BYTE* const limit = bEnd - 15; - U32 v1 = seed + PRIME32_1 + PRIME32_2; - U32 v2 = seed + PRIME32_2; - U32 v3 = seed + 0; - U32 v4 = seed - PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; - v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; - v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; - v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; - } while (p < limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) - + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + PRIME32_5; - } - - h32 += (U32)len; - - return XXH32_finalize(h32, p, len&15, endian, align); -} - - -XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_state_t state; - XXH32_reset(&state, seed); - XXH32_update(&state, input, len); - return XXH32_digest(&state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - - - -/*====== Hash streaming ======*/ - -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) -{ - XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME32_1 + PRIME32_2; - state.v2 = seed + PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME32_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); - return XXH_OK; -} - - -FORCE_INLINE XXH_errorcode -XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - return XXH_OK; -#else - return XXH_ERROR; -#endif - - { const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - - state->total_len_32 += (unsigned)len; - state->large_len |= (len>=16) | (state->total_len_32>=16); - - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); - state->memsize += (unsigned)len; - return XXH_OK; - } - - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const U32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); - } - p += 16-state->memsize; - state->memsize = 0; - } - - if (p <= bEnd-16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do { - v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } - - return XXH_OK; -} - - -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH32_update_endian(state_in, input, len, XXH_bigEndian); -} - - -FORCE_INLINE U32 -XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) -{ - U32 h32; - - if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) - + XXH_rotl32(state->v2, 7) - + XXH_rotl32(state->v3, 12) - + XXH_rotl32(state->v4, 18); - } else { - h32 = state->v3 /* == seed */ + PRIME32_5; - } - - h32 += state->total_len_32; - - return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); -} - - -XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_digest_endian(state_in, XXH_littleEndian); - else - return XXH32_digest_endian(state_in, XXH_bigEndian); -} - - -/*====== Canonical representation ======*/ - -/*! Default XXH result types are basic unsigned 32 and 64 bits. -* The canonical representation follows human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, remaining comparable across different systems. -*/ - -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); -} - - -#ifndef XXH_NO_LONG_LONG - -/* ******************************************************************* -* 64-bit hash functions -*********************************************************************/ - -/*====== Memory access ======*/ - -#ifndef MEM_MODULE -# define MEM_MODULE -# if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t U64; -# else - /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ - typedef unsigned long long U64; -# endif -#endif - - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; -static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ - -static U64 XXH_read64(const void* memPtr) -{ - U64 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap64 _byteswap_uint64 -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap64 __builtin_bswap64 -#else -static U64 XXH_swap64 (U64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); -} -#endif - -FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); - else - return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); -} - -FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE64_align(ptr, endian, XXH_unaligned); -} - -static U64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); -} - - -/*====== xxh64 ======*/ - -static const U64 PRIME64_1 = 11400714785074694791ULL; -static const U64 PRIME64_2 = 14029467366897019727ULL; -static const U64 PRIME64_3 = 1609587929392839161ULL; -static const U64 PRIME64_4 = 9650029242287828579ULL; -static const U64 PRIME64_5 = 2870177450012600261ULL; - -static U64 XXH64_round(U64 acc, U64 input) -{ - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; -} - -static U64 XXH64_mergeRound(U64 acc, U64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; -} - -static U64 XXH64_avalanche(U64 h64) -{ - h64 ^= h64 >> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - return h64; -} - - -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) - -static U64 -XXH64_finalize(U64 h64, const void* ptr, size_t len, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)ptr; - -#define PROCESS1_64 \ - h64 ^= (*p++) * PRIME64_5; \ - h64 = XXH_rotl64(h64, 11) * PRIME64_1; - -#define PROCESS4_64 \ - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ - p+=4; \ - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - -#define PROCESS8_64 { \ - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ - p+=8; \ - h64 ^= k1; \ - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ -} - - switch(len&31) { - case 24: PROCESS8_64; - /* fallthrough */ - case 16: PROCESS8_64; - /* fallthrough */ - case 8: PROCESS8_64; - return XXH64_avalanche(h64); - - case 28: PROCESS8_64; - /* fallthrough */ - case 20: PROCESS8_64; - /* fallthrough */ - case 12: PROCESS8_64; - /* fallthrough */ - case 4: PROCESS4_64; - return XXH64_avalanche(h64); - - case 25: PROCESS8_64; - /* fallthrough */ - case 17: PROCESS8_64; - /* fallthrough */ - case 9: PROCESS8_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 29: PROCESS8_64; - /* fallthrough */ - case 21: PROCESS8_64; - /* fallthrough */ - case 13: PROCESS8_64; - /* fallthrough */ - case 5: PROCESS4_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 26: PROCESS8_64; - /* fallthrough */ - case 18: PROCESS8_64; - /* fallthrough */ - case 10: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 30: PROCESS8_64; - /* fallthrough */ - case 22: PROCESS8_64; - /* fallthrough */ - case 14: PROCESS8_64; - /* fallthrough */ - case 6: PROCESS4_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 27: PROCESS8_64; - /* fallthrough */ - case 19: PROCESS8_64; - /* fallthrough */ - case 11: PROCESS8_64; - PROCESS1_64; - PROCESS1_64; - PROCESS1_64; - return XXH64_avalanche(h64); - - case 31: PROCESS8_64; - /* fallthrough */ - case 23: PROCESS8_64; - /* fallthrough */ - case 15: PROCESS8_64; - /* fallthrough */ - case 7: PROCESS4_64; - /* fallthrough */ - case 3: PROCESS1_64; - /* fallthrough */ - case 2: PROCESS1_64; - /* fallthrough */ - case 1: PROCESS1_64; - /* fallthrough */ - case 0: return XXH64_avalanche(h64); - } - - /* impossible to reach */ - assert(0); - return 0; /* unreachable, but some compilers complain without it */ -} - -FORCE_INLINE U64 -XXH64_endian_align(const void* input, size_t len, U64 seed, - XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U64 h64; - -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)32; - } -#endif - - if (len>=32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; - v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; - v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; - v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; - } while (p<=limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64) len; - - return XXH64_finalize(h64, p, len, endian, align); -} - - -XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_state_t state; - XXH64_reset(&state, seed); - XXH64_update(&state, input, len); - return XXH64_digest(&state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - -/*====== Hash Streaming ======*/ - -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) -{ - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); - return XXH_OK; -} - -FORCE_INLINE XXH_errorcode -XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - if (input==NULL) -#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) - return XXH_OK; -#else - return XXH_ERROR; -#endif - - { const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; - } - - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} - -FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) -{ - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 /*seed*/ + PRIME64_5; - } - - h64 += (U64) state->total_len; - - return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); -} - -XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); -} - - -/*====== Canonical representation ======*/ - -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); -} - -#endif /* XXH_NO_LONG_LONG */ diff --git a/librocksdb-sys/lz4/lib/xxhash.h b/librocksdb-sys/lz4/lib/xxhash.h deleted file mode 100644 index d6bad94..0000000 --- a/librocksdb-sys/lz4/lib/xxhash.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - xxHash - Extremely Fast Hash algorithm - Header File - Copyright (C) 2012-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - -/* Notice extracted from xxHash homepage : - -xxHash is an extremely fast Hash algorithm, running at RAM speed limits. -It also successfully passes all tests from the SMHasher suite. - -Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) - -Name Speed Q.Score Author -xxHash 5.4 GB/s 10 -CrapWow 3.2 GB/s 2 Andrew -MumurHash 3a 2.7 GB/s 10 Austin Appleby -SpookyHash 2.0 GB/s 10 Bob Jenkins -SBox 1.4 GB/s 9 Bret Mulvey -Lookup3 1.2 GB/s 9 Bob Jenkins -SuperFastHash 1.2 GB/s 1 Paul Hsieh -CityHash64 1.05 GB/s 10 Pike & Alakuijala -FNV 0.55 GB/s 5 Fowler, Noll, Vo -CRC32 0.43 GB/s 9 -MD5-32 0.33 GB/s 10 Ronald L. Rivest -SHA1-32 0.28 GB/s 10 - -Q.Score is a measure of quality of the hash function. -It depends on successfully passing SMHasher test set. -10 is a perfect score. - -A 64-bit version, named XXH64, is available since r35. -It offers much better speed, but for 64-bit applications only. -Name Speed on 64 bits Speed on 32 bits -XXH64 13.8 GB/s 1.9 GB/s -XXH32 6.8 GB/s 6.0 GB/s -*/ - -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - -#if defined (__cplusplus) -extern "C" { -#endif - - -/* **************************** -* Definitions -******************************/ -#include /* size_t */ -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; - - -/* **************************** - * API modifier - ******************************/ -/** XXH_INLINE_ALL (and XXH_PRIVATE_API) - * This is useful to include xxhash functions in `static` mode - * in order to inline them, and remove their symbol from the public list. - * Inlining can offer dramatic performance improvement on small keys. - * Methodology : - * #define XXH_INLINE_ALL - * #include "xxhash.h" - * `xxhash.c` is automatically included. - * It's not useful to compile and link it as a separate module. - */ -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif -# if defined(__GNUC__) -# define XXH_PUBLIC_API static __inline __attribute__((unused)) -# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define XXH_PUBLIC_API static inline -# elif defined(_MSC_VER) -# define XXH_PUBLIC_API static __inline -# else - /* this version may generate warnings for unused static functions */ -# define XXH_PUBLIC_API static -# endif -#else -# define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ - -/*! XXH_NAMESPACE, aka Namespace Emulation : - * - * If you want to include _and expose_ xxHash functions from within your own library, - * but also want to avoid symbol collisions with other libraries which may also include xxHash, - * - * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library - * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). - * - * Note that no change is required within the calling program as long as it includes `xxhash.h` : - * regular symbol name will be automatically translated by this header. - */ -#ifdef XXH_NAMESPACE -# define XXH_CAT(A,B) A##B -# define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) -# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) -#endif - - -/* ************************************* -* Version -***************************************/ -#define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 6 -#define XXH_VERSION_RELEASE 5 -#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) -XXH_PUBLIC_API unsigned XXH_versionNumber (void); - - -/*-********************************************************************** -* 32-bit hash -************************************************************************/ -typedef unsigned int XXH32_hash_t; - -/*! XXH32() : - Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". - The memory between input & input+length must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); - -/*====== Streaming ======*/ -typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); - -/* - * Streaming functions generate the xxHash of an input provided in multiple segments. - * Note that, for small input, they are slower than single-call functions, due to state management. - * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. - * - * XXH state must first be allocated, using XXH*_createState() . - * - * Start a new hash by initializing state with a seed, using XXH*_reset(). - * - * Then, feed the hash state by calling XXH*_update() as many times as necessary. - * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. - * - * Finally, a hash value can be produced anytime, by using XXH*_digest(). - * This function returns the nn-bits hash as an int or long long. - * - * It's still possible to continue inserting input into the hash state after a digest, - * and generate some new hashes later on, by calling again XXH*_digest(). - * - * When done, free XXH state space if it was allocated dynamically. - */ - -/*====== Canonical representation ======*/ - -typedef struct { unsigned char digest[4]; } XXH32_canonical_t; -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); - -/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. - * The canonical representation uses human-readable write convention, aka big-endian (large digits first). - * These functions allow transformation of hash result into and from its canonical format. - * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. - */ - - -#ifndef XXH_NO_LONG_LONG -/*-********************************************************************** -* 64-bit hash -************************************************************************/ -typedef unsigned long long XXH64_hash_t; - -/*! XXH64() : - Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". - "seed" can be used to alter the result predictably. - This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). -*/ -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); - -/*====== Streaming ======*/ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); - -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); - -/*====== Canonical representation ======*/ -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); -#endif /* XXH_NO_LONG_LONG */ - - - -#ifdef XXH_STATIC_LINKING_ONLY - -/* ================================================================================================ - This section contains declarations which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - These declarations should only be used with static linking. - Never use them in association with dynamic linking ! -=================================================================================================== */ - -/* These definitions are only present to allow - * static allocation of XXH state, on stack or in a struct for example. - * Never **ever** use members directly. */ - -#if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - -struct XXH32_state_s { - uint32_t total_len_32; - uint32_t large_len; - uint32_t v1; - uint32_t v2; - uint32_t v3; - uint32_t v4; - uint32_t mem32[4]; - uint32_t memsize; - uint32_t reserved; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ - -struct XXH64_state_s { - uint64_t total_len; - uint64_t v1; - uint64_t v2; - uint64_t v3; - uint64_t v4; - uint64_t mem64[4]; - uint32_t memsize; - uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ - -# else - -struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; - unsigned memsize; - unsigned reserved; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH32_state_t */ - -# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ -struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; - unsigned memsize; - unsigned reserved[2]; /* never read nor write, might be removed in a future version */ -}; /* typedef'd to XXH64_state_t */ -# endif - -# endif - - -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ -#endif - -#endif /* XXH_STATIC_LINKING_ONLY */ - - -#if defined (__cplusplus) -} -#endif - -#endif /* XXHASH_H_5627135585666179 */ diff --git a/librocksdb-sys/lz4/ossfuzz/.gitignore b/librocksdb-sys/lz4/ossfuzz/.gitignore deleted file mode 100644 index 0ef0d2b..0000000 --- a/librocksdb-sys/lz4/ossfuzz/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ - -# build artefacts -round_trip_frame_uncompressed_fuzzer - -# test artefacts - -# local tests - diff --git a/librocksdb-sys/lz4/ossfuzz/Makefile b/librocksdb-sys/lz4/ossfuzz/Makefile deleted file mode 100644 index deb2938..0000000 --- a/librocksdb-sys/lz4/ossfuzz/Makefile +++ /dev/null @@ -1,79 +0,0 @@ -# ########################################################################## -# LZ4 oss fuzzer - Makefile -# -# GPL v2 License -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# You can contact the author at : -# - LZ4 homepage : http://www.lz4.org -# - LZ4 source repository : https://github.com/lz4/lz4 -# ########################################################################## -# compress_fuzzer : OSS Fuzz test tool -# decompress_fuzzer : OSS Fuzz test tool -# ########################################################################## - -LZ4DIR := ../lib -LIB_FUZZING_ENGINE ?= - -DEBUGLEVEL?= 1 -DEBUGFLAGS = -g -DLZ4_DEBUG=$(DEBUGLEVEL) - -LZ4_CFLAGS = $(CFLAGS) $(DEBUGFLAGS) $(MOREFLAGS) -LZ4_CXXFLAGS = $(CXXFLAGS) $(DEBUGFLAGS) $(MOREFLAGS) -LZ4_CPPFLAGS = $(CPPFLAGS) -I$(LZ4DIR) -DXXH_NAMESPACE=LZ4_ \ - -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - -FUZZERS := \ - compress_fuzzer \ - decompress_fuzzer \ - round_trip_fuzzer \ - round_trip_stream_fuzzer \ - compress_hc_fuzzer \ - round_trip_hc_fuzzer \ - compress_frame_fuzzer \ - round_trip_frame_fuzzer \ - round_trip_frame_uncompressed_fuzzer \ - decompress_frame_fuzzer - -.PHONY: all -all: $(FUZZERS) - -# Include a rule to build the static library if calling this target -# directly. -$(LZ4DIR)/liblz4.a: - $(MAKE) -C $(LZ4DIR) CFLAGS="$(LZ4_CFLAGS)" liblz4.a - -%.o: %.c - $(CC) -c $(LZ4_CFLAGS) $(LZ4_CPPFLAGS) $< -o $@ - -# Generic rule for generating fuzzers -ifeq ($(LIB_FUZZING_ENGINE),) - LIB_FUZZING_DEPS := standaloneengine.o -else - LIB_FUZZING_DEPS := -endif -%_fuzzer: %_fuzzer.o lz4_helpers.o fuzz_data_producer.o $(LZ4DIR)/liblz4.a $(LIB_FUZZING_DEPS) - $(CXX) $(LZ4_CXXFLAGS) $(LZ4_CPPFLAGS) $(LDFLAGS) $(LIB_FUZZING_ENGINE) $^ -o $@$(EXT) - -%_fuzzer_clean: - $(RM) $*_fuzzer $*_fuzzer.o standaloneengine.o - -.PHONY: clean -clean: compress_fuzzer_clean decompress_fuzzer_clean \ - compress_frame_fuzzer_clean compress_hc_fuzzer_clean \ - decompress_frame_fuzzer_clean round_trip_frame_fuzzer_clean \ - round_trip_fuzzer_clean round_trip_hc_fuzzer_clean round_trip_stream_fuzzer_clean - $(MAKE) -C $(LZ4DIR) clean diff --git a/librocksdb-sys/lz4/ossfuzz/compress_frame_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/compress_frame_fuzzer.c deleted file mode 100644 index 568ae14..0000000 --- a/librocksdb-sys/lz4/ossfuzz/compress_frame_fuzzer.c +++ /dev/null @@ -1,48 +0,0 @@ -/** - * This fuzz target attempts to compress the fuzzed data with the simple - * compression function with an output buffer that may be too small to - * ensure that the compressor never crashes. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "lz4.h" -#include "lz4frame.h" -#include "lz4_helpers.h" -#include "fuzz_data_producer.h" - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - LZ4F_preferences_t const prefs = FUZZ_dataProducer_preferences(producer); - size_t const dstCapacitySeed = FUZZ_dataProducer_retrieve32(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const compressBound = LZ4F_compressFrameBound(size, &prefs); - size_t const dstCapacity = FUZZ_getRange_from_uint32(dstCapacitySeed, 0, compressBound); - - char* const dst = (char*)malloc(dstCapacity); - char* const rt = (char*)malloc(size); - - FUZZ_ASSERT(dst!=NULL); - FUZZ_ASSERT(rt!=NULL); - - /* If compression succeeds it must round trip correctly. */ - size_t const dstSize = - LZ4F_compressFrame(dst, dstCapacity, data, size, &prefs); - if (!LZ4F_isError(dstSize)) { - size_t const rtSize = FUZZ_decompressFrame(rt, size, dst, dstSize); - FUZZ_ASSERT_MSG(rtSize == size, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, size), "Corruption!"); - } - - free(dst); - free(rt); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/compress_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/compress_fuzzer.c deleted file mode 100644 index edc8aad..0000000 --- a/librocksdb-sys/lz4/ossfuzz/compress_fuzzer.c +++ /dev/null @@ -1,58 +0,0 @@ -/** - * This fuzz target attempts to compress the fuzzed data with the simple - * compression function with an output buffer that may be too small to - * ensure that the compressor never crashes. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "fuzz_data_producer.h" -#include "lz4.h" - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - size_t const dstCapacitySeed = FUZZ_dataProducer_retrieve32(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const compressBound = LZ4_compressBound(size); - size_t const dstCapacity = FUZZ_getRange_from_uint32(dstCapacitySeed, 0, compressBound); - - char* const dst = (char*)malloc(dstCapacity); - char* const rt = (char*)malloc(size); - - FUZZ_ASSERT(dst); - FUZZ_ASSERT(rt); - - /* If compression succeeds it must round trip correctly. */ - { - int const dstSize = LZ4_compress_default((const char*)data, dst, - size, dstCapacity); - if (dstSize > 0) { - int const rtSize = LZ4_decompress_safe(dst, rt, dstSize, size); - FUZZ_ASSERT_MSG(rtSize == size, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, size), "Corruption!"); - } - } - - if (dstCapacity > 0) { - /* Compression succeeds and must round trip correctly. */ - int compressedSize = size; - int const dstSize = LZ4_compress_destSize((const char*)data, dst, - &compressedSize, dstCapacity); - FUZZ_ASSERT(dstSize > 0); - int const rtSize = LZ4_decompress_safe(dst, rt, dstSize, size); - FUZZ_ASSERT_MSG(rtSize == compressedSize, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, compressedSize), "Corruption!"); - } - - free(dst); - free(rt); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/compress_hc_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/compress_hc_fuzzer.c deleted file mode 100644 index 7d8e45a..0000000 --- a/librocksdb-sys/lz4/ossfuzz/compress_hc_fuzzer.c +++ /dev/null @@ -1,64 +0,0 @@ -/** - * This fuzz target attempts to compress the fuzzed data with the simple - * compression function with an output buffer that may be too small to - * ensure that the compressor never crashes. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "fuzz_data_producer.h" -#include "lz4.h" -#include "lz4hc.h" - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - size_t const dstCapacitySeed = FUZZ_dataProducer_retrieve32(producer); - size_t const levelSeed = FUZZ_dataProducer_retrieve32(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const dstCapacity = FUZZ_getRange_from_uint32(dstCapacitySeed, 0, size); - int const level = FUZZ_getRange_from_uint32(levelSeed, LZ4HC_CLEVEL_MIN, LZ4HC_CLEVEL_MAX); - - char* const dst = (char*)malloc(dstCapacity); - char* const rt = (char*)malloc(size); - - FUZZ_ASSERT(dst); - FUZZ_ASSERT(rt); - - /* If compression succeeds it must round trip correctly. */ - { - int const dstSize = LZ4_compress_HC((const char*)data, dst, size, - dstCapacity, level); - if (dstSize > 0) { - int const rtSize = LZ4_decompress_safe(dst, rt, dstSize, size); - FUZZ_ASSERT_MSG(rtSize == size, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, size), "Corruption!"); - } - } - - if (dstCapacity > 0) { - /* Compression succeeds and must round trip correctly. */ - void* state = malloc(LZ4_sizeofStateHC()); - FUZZ_ASSERT(state); - int compressedSize = size; - int const dstSize = LZ4_compress_HC_destSize(state, (const char*)data, - dst, &compressedSize, - dstCapacity, level); - FUZZ_ASSERT(dstSize > 0); - int const rtSize = LZ4_decompress_safe(dst, rt, dstSize, size); - FUZZ_ASSERT_MSG(rtSize == compressedSize, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, compressedSize), "Corruption!"); - free(state); - } - - free(dst); - free(rt); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/decompress_frame_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/decompress_frame_fuzzer.c deleted file mode 100644 index 0fcbb16..0000000 --- a/librocksdb-sys/lz4/ossfuzz/decompress_frame_fuzzer.c +++ /dev/null @@ -1,75 +0,0 @@ -/** - * This fuzz target attempts to decompress the fuzzed data with the simple - * decompression function to ensure the decompressor never crashes. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "fuzz_data_producer.h" -#include "lz4.h" -#define LZ4F_STATIC_LINKING_ONLY -#include "lz4frame.h" -#include "lz4_helpers.h" - -static void decompress(LZ4F_dctx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* opts) -{ - LZ4F_resetDecompressionContext(dctx); - if (dictSize == 0) - LZ4F_decompress(dctx, dst, &dstCapacity, src, &srcSize, opts); - else - LZ4F_decompress_usingDict(dctx, dst, &dstCapacity, src, &srcSize, - dict, dictSize, opts); -} - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - size_t const dstCapacitySeed = FUZZ_dataProducer_retrieve32(producer); - size_t const dictSizeSeed = FUZZ_dataProducer_retrieve32(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const dstCapacity = FUZZ_getRange_from_uint32( - dstCapacitySeed, 0, 4 * size); - size_t const largeDictSize = 64 * 1024; - size_t const dictSize = FUZZ_getRange_from_uint32( - dictSizeSeed, 0, largeDictSize); - - char* const dst = (char*)malloc(dstCapacity); - char* const dict = (char*)malloc(dictSize); - LZ4F_decompressOptions_t opts; - LZ4F_dctx* dctx; - LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION); - - FUZZ_ASSERT(dctx); - FUZZ_ASSERT(dst); - FUZZ_ASSERT(dict); - - /* Prepare the dictionary. The data doesn't matter for decompression. */ - memset(dict, 0, dictSize); - - - /* Decompress using multiple configurations. */ - memset(&opts, 0, sizeof(opts)); - opts.stableDst = 0; - decompress(dctx, dst, dstCapacity, data, size, NULL, 0, &opts); - opts.stableDst = 1; - decompress(dctx, dst, dstCapacity, data, size, NULL, 0, &opts); - opts.stableDst = 0; - decompress(dctx, dst, dstCapacity, data, size, dict, dictSize, &opts); - opts.stableDst = 1; - decompress(dctx, dst, dstCapacity, data, size, dict, dictSize, &opts); - - LZ4F_freeDecompressionContext(dctx); - free(dst); - free(dict); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/decompress_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/decompress_fuzzer.c deleted file mode 100644 index 490b3fd..0000000 --- a/librocksdb-sys/lz4/ossfuzz/decompress_fuzzer.c +++ /dev/null @@ -1,78 +0,0 @@ -/** - * This fuzz target attempts to decompress the fuzzed data with the simple - * decompression function to ensure the decompressor never crashes. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "fuzz_data_producer.h" -#include "lz4.h" - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - size_t const dstCapacitySeed = FUZZ_dataProducer_retrieve32(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const dstCapacity = FUZZ_getRange_from_uint32(dstCapacitySeed, 0, 4 * size); - size_t const smallDictSize = size + 1; - size_t const largeDictSize = 64 * 1024 - 1; - size_t const dictSize = MAX(smallDictSize, largeDictSize); - char* const dst = (char*)malloc(dstCapacity); - char* const dict = (char*)malloc(dictSize + size); - char* const largeDict = dict; - char* const dataAfterDict = dict + dictSize; - char* const smallDict = dataAfterDict - smallDictSize; - - FUZZ_ASSERT(dst); - FUZZ_ASSERT(dict); - - /* Prepare the dictionary. The data doesn't matter for decompression. */ - memset(dict, 0, dictSize); - memcpy(dataAfterDict, data, size); - - /* Decompress using each possible dictionary configuration. */ - /* No dictionary. */ - LZ4_decompress_safe_usingDict((char const*)data, dst, size, - dstCapacity, NULL, 0); - /* Small external dictionary. */ - LZ4_decompress_safe_usingDict((char const*)data, dst, size, - dstCapacity, smallDict, smallDictSize); - /* Large external dictionary. */ - LZ4_decompress_safe_usingDict((char const*)data, dst, size, - dstCapacity, largeDict, largeDictSize); - /* Small prefix. */ - LZ4_decompress_safe_usingDict((char const*)dataAfterDict, dst, size, - dstCapacity, smallDict, smallDictSize); - /* Large prefix. */ - LZ4_decompress_safe_usingDict((char const*)dataAfterDict, dst, size, - dstCapacity, largeDict, largeDictSize); - /* Partial decompression. */ - LZ4_decompress_safe_partial((char const*)data, dst, size, - dstCapacity, dstCapacity); - /* Partial decompression using each possible dictionary configuration. */ - /* Partial decompression with no dictionary. */ - LZ4_decompress_safe_partial_usingDict((char const*)data, dst, size, - dstCapacity, dstCapacity, NULL, 0); - /* Partial decompression with small external dictionary. */ - LZ4_decompress_safe_partial_usingDict((char const*)data, dst, size, - dstCapacity, dstCapacity, smallDict, smallDictSize); - /* Partial decompression with large external dictionary. */ - LZ4_decompress_safe_partial_usingDict((char const*)data, dst, size, - dstCapacity, dstCapacity, largeDict, largeDictSize); - /* Partial decompression with small prefix. */ - LZ4_decompress_safe_partial_usingDict((char const*)dataAfterDict, dst, size, - dstCapacity, dstCapacity, smallDict, smallDictSize); - /* Partial decompression wtih large prefix. */ - LZ4_decompress_safe_partial_usingDict((char const*)dataAfterDict, dst, size, - dstCapacity, dstCapacity, largeDict, largeDictSize); - free(dst); - free(dict); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/fuzz.h b/librocksdb-sys/lz4/ossfuzz/fuzz.h deleted file mode 100644 index eefac63..0000000 --- a/librocksdb-sys/lz4/ossfuzz/fuzz.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Fuzz target interface. - * Fuzz targets have some common parameters passed as macros during compilation. - * Check the documentation for each individual fuzzer for more parameters. - * - * @param FUZZ_RNG_SEED_SIZE: - * The number of bytes of the source to look at when constructing a seed - * for the deterministic RNG. These bytes are discarded before passing - * the data to lz4 functions. Every fuzzer initializes the RNG exactly - * once before doing anything else, even if it is unused. - * Default: 4. - * @param LZ4_DEBUG: - * This is a parameter for the lz4 library. Defining `LZ4_DEBUG=1` - * enables assert() statements in the lz4 library. Higher levels enable - * logging, so aren't recommended. Defining `LZ4_DEBUG=1` is - * recommended. - * @param LZ4_FORCE_MEMORY_ACCESS: - * This flag controls how the zstd library accesses unaligned memory. - * It can be undefined, or 0 through 2. If it is undefined, it selects - * the method to use based on the compiler. If testing with UBSAN set - * MEM_FORCE_MEMORY_ACCESS=0 to use the standard compliant method. - * @param FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - * This is the canonical flag to enable deterministic builds for fuzzing. - * Changes to zstd for fuzzing are gated behind this define. - * It is recommended to define this when building zstd for fuzzing. - */ - -#ifndef FUZZ_H -#define FUZZ_H - -#ifndef FUZZ_RNG_SEED_SIZE -# define FUZZ_RNG_SEED_SIZE 4 -#endif - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -int LLVMFuzzerTestOneInput(const uint8_t *src, size_t size); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/librocksdb-sys/lz4/ossfuzz/fuzz_data_producer.c b/librocksdb-sys/lz4/ossfuzz/fuzz_data_producer.c deleted file mode 100644 index 670fbf5..0000000 --- a/librocksdb-sys/lz4/ossfuzz/fuzz_data_producer.c +++ /dev/null @@ -1,77 +0,0 @@ -#include "fuzz_data_producer.h" - -struct FUZZ_dataProducer_s{ - const uint8_t *data; - size_t size; -}; - -FUZZ_dataProducer_t* FUZZ_dataProducer_create(const uint8_t* data, size_t size) { - FUZZ_dataProducer_t* const producer = malloc(sizeof(FUZZ_dataProducer_t)); - - FUZZ_ASSERT(producer != NULL); - - producer->data = data; - producer->size = size; - return producer; -} - -void FUZZ_dataProducer_free(FUZZ_dataProducer_t *producer) { free(producer); } - -uint32_t FUZZ_dataProducer_retrieve32(FUZZ_dataProducer_t *producer) { - const uint8_t* data = producer->data; - const size_t size = producer->size; - if (size == 0) { - return 0; - } else if (size < 4) { - producer->size -= 1; - return (uint32_t)data[size - 1]; - } else { - producer->size -= 4; - return *(data + size - 4); - } -} - -uint32_t FUZZ_getRange_from_uint32(uint32_t seed, uint32_t min, uint32_t max) -{ - uint32_t range = max - min; - if (range == 0xffffffff) { - return seed; - } - return min + seed % (range + 1); -} - -uint32_t FUZZ_dataProducer_range32(FUZZ_dataProducer_t* producer, - uint32_t min, uint32_t max) -{ - size_t const seed = FUZZ_dataProducer_retrieve32(producer); - return FUZZ_getRange_from_uint32(seed, min, max); -} - -LZ4F_frameInfo_t FUZZ_dataProducer_frameInfo(FUZZ_dataProducer_t* producer) -{ - LZ4F_frameInfo_t info = LZ4F_INIT_FRAMEINFO; - info.blockSizeID = FUZZ_dataProducer_range32(producer, LZ4F_max64KB - 1, LZ4F_max4MB); - if (info.blockSizeID < LZ4F_max64KB) { - info.blockSizeID = LZ4F_default; - } - info.blockMode = FUZZ_dataProducer_range32(producer, LZ4F_blockLinked, LZ4F_blockIndependent); - info.contentChecksumFlag = FUZZ_dataProducer_range32(producer, LZ4F_noContentChecksum, - LZ4F_contentChecksumEnabled); - info.blockChecksumFlag = FUZZ_dataProducer_range32(producer, LZ4F_noBlockChecksum, - LZ4F_blockChecksumEnabled); - return info; -} - -LZ4F_preferences_t FUZZ_dataProducer_preferences(FUZZ_dataProducer_t* producer) -{ - LZ4F_preferences_t prefs = LZ4F_INIT_PREFERENCES; - prefs.frameInfo = FUZZ_dataProducer_frameInfo(producer); - prefs.compressionLevel = FUZZ_dataProducer_range32(producer, 0, LZ4HC_CLEVEL_MAX + 3) - 3; - prefs.autoFlush = FUZZ_dataProducer_range32(producer, 0, 1); - prefs.favorDecSpeed = FUZZ_dataProducer_range32(producer, 0, 1); - return prefs; -} - -size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer){ - return producer->size; -} diff --git a/librocksdb-sys/lz4/ossfuzz/fuzz_data_producer.h b/librocksdb-sys/lz4/ossfuzz/fuzz_data_producer.h deleted file mode 100644 index b96dcba..0000000 --- a/librocksdb-sys/lz4/ossfuzz/fuzz_data_producer.h +++ /dev/null @@ -1,36 +0,0 @@ -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "lz4frame.h" -#include "lz4hc.h" - -/* Struct used for maintaining the state of the data */ -typedef struct FUZZ_dataProducer_s FUZZ_dataProducer_t; - -/* Returns a data producer state struct. Use for producer initialization. */ -FUZZ_dataProducer_t *FUZZ_dataProducer_create(const uint8_t *data, size_t size); - -/* Frees the data producer */ -void FUZZ_dataProducer_free(FUZZ_dataProducer_t *producer); - -/* Returns 32 bits from the end of data */ -uint32_t FUZZ_dataProducer_retrieve32(FUZZ_dataProducer_t *producer); - -/* Returns value between [min, max] */ -uint32_t FUZZ_getRange_from_uint32(uint32_t seed, uint32_t min, uint32_t max); - -/* Combination of above two functions for non adaptive use cases. ie where size is not involved */ -uint32_t FUZZ_dataProducer_range32(FUZZ_dataProducer_t *producer, uint32_t min, - uint32_t max); - -/* Returns lz4 preferences */ -LZ4F_preferences_t FUZZ_dataProducer_preferences(FUZZ_dataProducer_t* producer); - -/* Returns lz4 frame info */ -LZ4F_frameInfo_t FUZZ_dataProducer_frameInfo(FUZZ_dataProducer_t* producer); - -/* Returns the size of the remaining bytes of data in the producer */ -size_t FUZZ_dataProducer_remainingBytes(FUZZ_dataProducer_t *producer); diff --git a/librocksdb-sys/lz4/ossfuzz/fuzz_helpers.h b/librocksdb-sys/lz4/ossfuzz/fuzz_helpers.h deleted file mode 100644 index efd9acf..0000000 --- a/librocksdb-sys/lz4/ossfuzz/fuzz_helpers.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree), - * meaning you may select, at your option, one of the above-listed licenses. - */ - -/** - * Helper functions for fuzzing. - */ - -#ifndef FUZZ_HELPERS_H -#define FUZZ_HELPERS_H - -#include "fuzz.h" -#include "xxhash.h" -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#define LZ4_COMMONDEFS_ONLY -#ifndef LZ4_SRC_INCLUDED -#include "lz4.c" /* LZ4_count, constants, mem */ -#endif - -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) -#define MAX(a,b) ( (a) > (b) ? (a) : (b) ) - -#define FUZZ_QUOTE_IMPL(str) #str -#define FUZZ_QUOTE(str) FUZZ_QUOTE_IMPL(str) - -/** - * Asserts for fuzzing that are always enabled. - */ -#define FUZZ_ASSERT_MSG(cond, msg) \ - ((cond) ? (void)0 \ - : (fprintf(stderr, "%s: %u: Assertion: `%s' failed. %s\n", __FILE__, \ - __LINE__, FUZZ_QUOTE(cond), (msg)), \ - abort())) -#define FUZZ_ASSERT(cond) FUZZ_ASSERT_MSG((cond), ""); - -#if defined(__GNUC__) -#define FUZZ_STATIC static __inline __attribute__((unused)) -#elif defined(__cplusplus) || \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -#define FUZZ_STATIC static inline -#elif defined(_MSC_VER) -#define FUZZ_STATIC static __inline -#else -#define FUZZ_STATIC static -#endif - -/** - * Deterministically constructs a seed based on the fuzz input. - * Consumes up to the first FUZZ_RNG_SEED_SIZE bytes of the input. - */ -FUZZ_STATIC uint32_t FUZZ_seed(uint8_t const **src, size_t* size) { - uint8_t const *data = *src; - size_t const toHash = MIN(FUZZ_RNG_SEED_SIZE, *size); - *size -= toHash; - *src += toHash; - return XXH32(data, toHash, 0); -} - -#define FUZZ_rotl32(x, r) (((x) << (r)) | ((x) >> (32 - (r)))) - -FUZZ_STATIC uint32_t FUZZ_rand(uint32_t *state) { - static const uint32_t prime1 = 2654435761U; - static const uint32_t prime2 = 2246822519U; - uint32_t rand32 = *state; - rand32 *= prime1; - rand32 += prime2; - rand32 = FUZZ_rotl32(rand32, 13); - *state = rand32; - return rand32 >> 5; -} - -/* Returns a random number in the range [min, max]. */ -FUZZ_STATIC uint32_t FUZZ_rand32(uint32_t *state, uint32_t min, uint32_t max) { - uint32_t random = FUZZ_rand(state); - return min + (random % (max - min + 1)); -} - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/librocksdb-sys/lz4/ossfuzz/lz4_helpers.c b/librocksdb-sys/lz4/ossfuzz/lz4_helpers.c deleted file mode 100644 index 9471630..0000000 --- a/librocksdb-sys/lz4/ossfuzz/lz4_helpers.c +++ /dev/null @@ -1,51 +0,0 @@ -#include "fuzz_helpers.h" -#include "lz4_helpers.h" -#include "lz4hc.h" - -LZ4F_frameInfo_t FUZZ_randomFrameInfo(uint32_t* seed) -{ - LZ4F_frameInfo_t info = LZ4F_INIT_FRAMEINFO; - info.blockSizeID = FUZZ_rand32(seed, LZ4F_max64KB - 1, LZ4F_max4MB); - if (info.blockSizeID < LZ4F_max64KB) { - info.blockSizeID = LZ4F_default; - } - info.blockMode = FUZZ_rand32(seed, LZ4F_blockLinked, LZ4F_blockIndependent); - info.contentChecksumFlag = FUZZ_rand32(seed, LZ4F_noContentChecksum, - LZ4F_contentChecksumEnabled); - info.blockChecksumFlag = FUZZ_rand32(seed, LZ4F_noBlockChecksum, - LZ4F_blockChecksumEnabled); - return info; -} - -LZ4F_preferences_t FUZZ_randomPreferences(uint32_t* seed) -{ - LZ4F_preferences_t prefs = LZ4F_INIT_PREFERENCES; - prefs.frameInfo = FUZZ_randomFrameInfo(seed); - prefs.compressionLevel = FUZZ_rand32(seed, 0, LZ4HC_CLEVEL_MAX + 3) - 3; - prefs.autoFlush = FUZZ_rand32(seed, 0, 1); - prefs.favorDecSpeed = FUZZ_rand32(seed, 0, 1); - return prefs; -} - -size_t FUZZ_decompressFrame(void* dst, const size_t dstCapacity, - const void* src, const size_t srcSize) -{ - LZ4F_decompressOptions_t opts; - memset(&opts, 0, sizeof(opts)); - opts.stableDst = 1; - LZ4F_dctx* dctx; - LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION); - FUZZ_ASSERT(dctx); - - size_t dstSize = dstCapacity; - size_t srcConsumed = srcSize; - size_t const rc = - LZ4F_decompress(dctx, dst, &dstSize, src, &srcConsumed, &opts); - FUZZ_ASSERT(!LZ4F_isError(rc)); - FUZZ_ASSERT(rc == 0); - FUZZ_ASSERT(srcConsumed == srcSize); - - LZ4F_freeDecompressionContext(dctx); - - return dstSize; -} diff --git a/librocksdb-sys/lz4/ossfuzz/lz4_helpers.h b/librocksdb-sys/lz4/ossfuzz/lz4_helpers.h deleted file mode 100644 index c99fb01..0000000 --- a/librocksdb-sys/lz4/ossfuzz/lz4_helpers.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef LZ4_HELPERS -#define LZ4_HELPERS - -#include "lz4frame.h" - -LZ4F_frameInfo_t FUZZ_randomFrameInfo(uint32_t* seed); - -LZ4F_preferences_t FUZZ_randomPreferences(uint32_t* seed); - -size_t FUZZ_decompressFrame(void* dst, const size_t dstCapacity, - const void* src, const size_t srcSize); - -#endif /* LZ4_HELPERS */ diff --git a/librocksdb-sys/lz4/ossfuzz/ossfuzz.sh b/librocksdb-sys/lz4/ossfuzz/ossfuzz.sh deleted file mode 100755 index 9782286..0000000 --- a/librocksdb-sys/lz4/ossfuzz/ossfuzz.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -eu - -# This script is called by the oss-fuzz main project when compiling the fuzz -# targets. This script is regression tested by travisoss.sh. - -# Save off the current folder as the build root. -export BUILD_ROOT=$PWD - -echo "CC: $CC" -echo "CXX: $CXX" -echo "LIB_FUZZING_ENGINE: $LIB_FUZZING_ENGINE" -echo "CFLAGS: $CFLAGS" -echo "CXXFLAGS: $CXXFLAGS" -echo "OUT: $OUT" - -export MAKEFLAGS+="-j$(nproc)" - -pushd ossfuzz -make V=1 all -popd - -# Copy the fuzzers to the target directory. -cp -v ossfuzz/*_fuzzer $OUT/ diff --git a/librocksdb-sys/lz4/ossfuzz/round_trip_frame_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/round_trip_frame_fuzzer.c deleted file mode 100644 index 149542d..0000000 --- a/librocksdb-sys/lz4/ossfuzz/round_trip_frame_fuzzer.c +++ /dev/null @@ -1,43 +0,0 @@ -/** - * This fuzz target performs a lz4 round-trip test (compress & decompress), - * compares the result with the original, and calls abort() on corruption. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "lz4.h" -#include "lz4frame.h" -#include "lz4_helpers.h" -#include "fuzz_data_producer.h" - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t* producer = FUZZ_dataProducer_create(data, size); - LZ4F_preferences_t const prefs = FUZZ_dataProducer_preferences(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const dstCapacity = LZ4F_compressFrameBound(LZ4_compressBound(size), &prefs); - char* const dst = (char*)malloc(dstCapacity); - char* const rt = (char*)malloc(FUZZ_dataProducer_remainingBytes(producer)); - - FUZZ_ASSERT(dst); - FUZZ_ASSERT(rt); - - /* Compression must succeed and round trip correctly. */ - size_t const dstSize = - LZ4F_compressFrame(dst, dstCapacity, data, size, &prefs); - FUZZ_ASSERT(!LZ4F_isError(dstSize)); - size_t const rtSize = FUZZ_decompressFrame(rt, size, dst, dstSize); - FUZZ_ASSERT_MSG(rtSize == size, "Incorrect regenerated size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, size), "Corruption!"); - - free(dst); - free(rt); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/round_trip_frame_uncompressed_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/round_trip_frame_uncompressed_fuzzer.c deleted file mode 100644 index 76a99d2..0000000 --- a/librocksdb-sys/lz4/ossfuzz/round_trip_frame_uncompressed_fuzzer.c +++ /dev/null @@ -1,134 +0,0 @@ -/** - * This fuzz target performs a lz4 round-trip test (compress & decompress), - * compares the result with the original, and calls abort() on corruption. - */ - -#include -#include -#include -#include - -#include "fuzz_data_producer.h" -#include "fuzz_helpers.h" -#include "lz4.h" -#include "lz4_helpers.h" -#include "lz4frame.h" -#include "lz4frame_static.h" - -static void decompress(LZ4F_dctx *dctx, void *src, void *dst, - size_t dstCapacity, size_t readSize) { - size_t ret = 1; - const void *srcPtr = (const char *) src; - void *dstPtr = (char *) dst; - const void *const srcEnd = (const char *) srcPtr + readSize; - - while (ret != 0) { - while (srcPtr < srcEnd && ret != 0) { - /* Any data within dst has been flushed at this stage */ - size_t dstSize = dstCapacity; - size_t srcSize = (const char *) srcEnd - (const char *) srcPtr; - ret = LZ4F_decompress(dctx, dstPtr, &dstSize, srcPtr, &srcSize, - /* LZ4F_decompressOptions_t */ NULL); - FUZZ_ASSERT(!LZ4F_isError(ret)); - - /* Update input */ - srcPtr = (const char *) srcPtr + srcSize; - dstPtr = (char *) dstPtr + dstSize; - } - - FUZZ_ASSERT(srcPtr <= srcEnd); - } -} - -static void compress_round_trip(const uint8_t *data, size_t size, - FUZZ_dataProducer_t *producer, LZ4F_preferences_t const prefs) { - - // Choose random uncompressed offset start and end by producing seeds from random data, calculate the remaining - // data size that will be used for compression later and use the seeds to actually calculate the offsets - size_t const uncompressedOffsetSeed = FUZZ_dataProducer_retrieve32(producer); - size_t const uncompressedEndOffsetSeed = FUZZ_dataProducer_retrieve32(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const uncompressedOffset = FUZZ_getRange_from_uint32(uncompressedOffsetSeed, 0, size); - size_t const uncompressedEndOffset = FUZZ_getRange_from_uint32(uncompressedEndOffsetSeed, uncompressedOffset, size); - size_t const uncompressedSize = uncompressedEndOffset - uncompressedOffset; - FUZZ_ASSERT(uncompressedOffset <= uncompressedEndOffset); - FUZZ_ASSERT(uncompressedEndOffset <= size); - - const uint8_t *const uncompressedData = data + uncompressedOffset; - - size_t const dstCapacity = - LZ4F_compressFrameBound(LZ4_compressBound(size), &prefs) + - uncompressedSize; - char *const dst = (char *) malloc(dstCapacity); - size_t rtCapacity = dstCapacity; - char *const rt = (char *) malloc(rtCapacity); - - FUZZ_ASSERT(dst); - FUZZ_ASSERT(rt); - - /* Compression must succeed and round trip correctly. */ - LZ4F_compressionContext_t ctx; - size_t const ctxCreation = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION); - FUZZ_ASSERT(!LZ4F_isError(ctxCreation)); - - size_t const headerSize = LZ4F_compressBegin(ctx, dst, dstCapacity, &prefs); - FUZZ_ASSERT(!LZ4F_isError(headerSize)); - size_t compressedSize = headerSize; - - /* Compress data before uncompressed offset */ - size_t lz4Return = LZ4F_compressUpdate(ctx, dst + compressedSize, dstCapacity, - data, uncompressedOffset, NULL); - FUZZ_ASSERT(!LZ4F_isError(lz4Return)); - compressedSize += lz4Return; - - /* Add uncompressed data */ - lz4Return = LZ4F_uncompressedUpdate(ctx, dst + compressedSize, dstCapacity, - uncompressedData, uncompressedSize, NULL); - FUZZ_ASSERT(!LZ4F_isError(lz4Return)); - compressedSize += lz4Return; - - /* Compress data after uncompressed offset */ - lz4Return = LZ4F_compressUpdate(ctx, dst + compressedSize, dstCapacity, - data + uncompressedEndOffset, - size - uncompressedEndOffset, NULL); - FUZZ_ASSERT(!LZ4F_isError(lz4Return)); - compressedSize += lz4Return; - - /* Finish compression */ - lz4Return = LZ4F_compressEnd(ctx, dst + compressedSize, dstCapacity, NULL); - FUZZ_ASSERT(!LZ4F_isError(lz4Return)); - compressedSize += lz4Return; - - LZ4F_decompressOptions_t opts; - memset(&opts, 0, sizeof(opts)); - opts.stableDst = 1; - LZ4F_dctx *dctx; - LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION); - FUZZ_ASSERT(dctx); - - decompress(dctx, dst, rt, rtCapacity, compressedSize); - - LZ4F_freeDecompressionContext(dctx); - - FUZZ_ASSERT_MSG(!memcmp(data, rt, size), "Corruption!"); - - free(dst); - free(rt); - - FUZZ_dataProducer_free(producer); - LZ4F_freeCompressionContext(ctx); -} - -static void compress_independent_block_mode(const uint8_t *data, size_t size) { - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - LZ4F_preferences_t prefs = FUZZ_dataProducer_preferences(producer); - prefs.frameInfo.blockMode = LZ4F_blockIndependent; - compress_round_trip(data, size, producer, prefs); -} - - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { - compress_independent_block_mode(data, size); - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/round_trip_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/round_trip_fuzzer.c deleted file mode 100644 index 6236201..0000000 --- a/librocksdb-sys/lz4/ossfuzz/round_trip_fuzzer.c +++ /dev/null @@ -1,117 +0,0 @@ -/** - * This fuzz target performs a lz4 round-trip test (compress & decompress), - * compares the result with the original, and calls abort() on corruption. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "lz4.h" -#include "fuzz_data_producer.h" - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - size_t const partialCapacitySeed = FUZZ_dataProducer_retrieve32(producer); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const partialCapacity = FUZZ_getRange_from_uint32(partialCapacitySeed, 0, size); - size_t const dstCapacity = LZ4_compressBound(size); - size_t const largeSize = 64 * 1024 - 1; - size_t const smallSize = 1024; - char* const dstPlusLargePrefix = (char*)malloc(dstCapacity + largeSize); - FUZZ_ASSERT(dstPlusLargePrefix); - char* const dstPlusSmallPrefix = dstPlusLargePrefix + largeSize - smallSize; - char* const largeDict = (char*)malloc(largeSize); - FUZZ_ASSERT(largeDict); - char* const smallDict = largeDict + largeSize - smallSize; - char* const dst = dstPlusLargePrefix + largeSize; - char* const rt = (char*)malloc(size); - FUZZ_ASSERT(rt); - - /* Compression must succeed and round trip correctly. */ - int const dstSize = LZ4_compress_default((const char*)data, dst, - size, dstCapacity); - FUZZ_ASSERT(dstSize > 0); - - int const rtSize = LZ4_decompress_safe(dst, rt, dstSize, size); - FUZZ_ASSERT_MSG(rtSize == size, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, size), "Corruption!"); - - /* Partial decompression must succeed. */ - { - char* const partial = (char*)malloc(partialCapacity); - FUZZ_ASSERT(partial); - int const partialSize = LZ4_decompress_safe_partial( - dst, partial, dstSize, partialCapacity, partialCapacity); - FUZZ_ASSERT(partialSize >= 0); - FUZZ_ASSERT_MSG(partialSize == partialCapacity, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, partial, partialSize), "Corruption!"); - free(partial); - } - /* Partial decompression using dict with no dict. */ - { - char* const partial = (char*)malloc(partialCapacity); - FUZZ_ASSERT(partial); - int const partialSize = LZ4_decompress_safe_partial_usingDict( - dst, partial, dstSize, partialCapacity, partialCapacity, NULL, 0); - FUZZ_ASSERT(partialSize >= 0); - FUZZ_ASSERT_MSG(partialSize == partialCapacity, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, partial, partialSize), "Corruption!"); - free(partial); - } - /* Partial decompression using dict with small prefix as dict */ - { - char* const partial = (char*)malloc(partialCapacity); - FUZZ_ASSERT(partial); - int const partialSize = LZ4_decompress_safe_partial_usingDict( - dst, partial, dstSize, partialCapacity, partialCapacity, dstPlusSmallPrefix, smallSize); - FUZZ_ASSERT(partialSize >= 0); - FUZZ_ASSERT_MSG(partialSize == partialCapacity, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, partial, partialSize), "Corruption!"); - free(partial); - } - /* Partial decompression using dict with large prefix as dict */ - { - char* const partial = (char*)malloc(partialCapacity); - FUZZ_ASSERT(partial); - int const partialSize = LZ4_decompress_safe_partial_usingDict( - dst, partial, dstSize, partialCapacity, partialCapacity, dstPlusLargePrefix, largeSize); - FUZZ_ASSERT(partialSize >= 0); - FUZZ_ASSERT_MSG(partialSize == partialCapacity, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, partial, partialSize), "Corruption!"); - free(partial); - } - /* Partial decompression using dict with small external dict */ - { - char* const partial = (char*)malloc(partialCapacity); - FUZZ_ASSERT(partial); - int const partialSize = LZ4_decompress_safe_partial_usingDict( - dst, partial, dstSize, partialCapacity, partialCapacity, smallDict, smallSize); - FUZZ_ASSERT(partialSize >= 0); - FUZZ_ASSERT_MSG(partialSize == partialCapacity, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, partial, partialSize), "Corruption!"); - free(partial); - } - /* Partial decompression using dict with large external dict */ - { - char* const partial = (char*)malloc(partialCapacity); - FUZZ_ASSERT(partial); - int const partialSize = LZ4_decompress_safe_partial_usingDict( - dst, partial, dstSize, partialCapacity, partialCapacity, largeDict, largeSize); - FUZZ_ASSERT(partialSize >= 0); - FUZZ_ASSERT_MSG(partialSize == partialCapacity, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, partial, partialSize), "Corruption!"); - free(partial); - } - - free(dstPlusLargePrefix); - free(largeDict); - free(rt); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/round_trip_hc_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/round_trip_hc_fuzzer.c deleted file mode 100644 index 7d03ee2..0000000 --- a/librocksdb-sys/lz4/ossfuzz/round_trip_hc_fuzzer.c +++ /dev/null @@ -1,44 +0,0 @@ -/** - * This fuzz target performs a lz4 round-trip test (compress & decompress), - * compares the result with the original, and calls abort() on corruption. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#include "fuzz_data_producer.h" -#include "lz4.h" -#include "lz4hc.h" - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - FUZZ_dataProducer_t *producer = FUZZ_dataProducer_create(data, size); - int const level = FUZZ_dataProducer_range32(producer, - LZ4HC_CLEVEL_MIN, LZ4HC_CLEVEL_MAX); - size = FUZZ_dataProducer_remainingBytes(producer); - - size_t const dstCapacity = LZ4_compressBound(size); - char* const dst = (char*)malloc(dstCapacity); - char* const rt = (char*)malloc(size); - - FUZZ_ASSERT(dst); - FUZZ_ASSERT(rt); - - /* Compression must succeed and round trip correctly. */ - int const dstSize = LZ4_compress_HC((const char*)data, dst, size, - dstCapacity, level); - FUZZ_ASSERT(dstSize > 0); - - int const rtSize = LZ4_decompress_safe(dst, rt, dstSize, size); - FUZZ_ASSERT_MSG(rtSize == size, "Incorrect size"); - FUZZ_ASSERT_MSG(!memcmp(data, rt, size), "Corruption!"); - - free(dst); - free(rt); - FUZZ_dataProducer_free(producer); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/round_trip_stream_fuzzer.c b/librocksdb-sys/lz4/ossfuzz/round_trip_stream_fuzzer.c deleted file mode 100644 index abfcd2d..0000000 --- a/librocksdb-sys/lz4/ossfuzz/round_trip_stream_fuzzer.c +++ /dev/null @@ -1,302 +0,0 @@ -/** - * This fuzz target performs a lz4 streaming round-trip test - * (compress & decompress), compares the result with the original, and calls - * abort() on corruption. - */ - -#include -#include -#include -#include - -#include "fuzz_helpers.h" -#define LZ4_STATIC_LINKING_ONLY -#include "lz4.h" -#define LZ4_HC_STATIC_LINKING_ONLY -#include "lz4hc.h" - -typedef struct { - char const* buf; - size_t size; - size_t pos; -} const_cursor_t; - -typedef struct { - char* buf; - size_t size; - size_t pos; -} cursor_t; - -typedef struct { - LZ4_stream_t* cstream; - LZ4_streamHC_t* cstreamHC; - LZ4_streamDecode_t* dstream; - const_cursor_t data; - cursor_t compressed; - cursor_t roundTrip; - uint32_t seed; - int level; -} state_t; - -cursor_t cursor_create(size_t size) -{ - cursor_t cursor; - cursor.buf = (char*)malloc(size); - cursor.size = size; - cursor.pos = 0; - FUZZ_ASSERT(cursor.buf); - return cursor; -} - -typedef void (*round_trip_t)(state_t* state); - -void cursor_free(cursor_t cursor) -{ - free(cursor.buf); -} - -state_t state_create(char const* data, size_t size, uint32_t seed) -{ - state_t state; - - state.seed = seed; - - state.data.buf = (char const*)data; - state.data.size = size; - state.data.pos = 0; - - /* Extra margin because we are streaming. */ - state.compressed = cursor_create(1024 + 2 * LZ4_compressBound(size)); - state.roundTrip = cursor_create(size); - - state.cstream = LZ4_createStream(); - FUZZ_ASSERT(state.cstream); - state.cstreamHC = LZ4_createStreamHC(); - FUZZ_ASSERT(state.cstream); - state.dstream = LZ4_createStreamDecode(); - FUZZ_ASSERT(state.dstream); - - return state; -} - -void state_free(state_t state) -{ - cursor_free(state.compressed); - cursor_free(state.roundTrip); - LZ4_freeStream(state.cstream); - LZ4_freeStreamHC(state.cstreamHC); - LZ4_freeStreamDecode(state.dstream); -} - -static void state_reset(state_t* state, uint32_t seed) -{ - state->level = FUZZ_rand32(&seed, LZ4HC_CLEVEL_MIN, LZ4HC_CLEVEL_MAX); - LZ4_resetStream_fast(state->cstream); - LZ4_resetStreamHC_fast(state->cstreamHC, state->level); - LZ4_setStreamDecode(state->dstream, NULL, 0); - state->data.pos = 0; - state->compressed.pos = 0; - state->roundTrip.pos = 0; - state->seed = seed; -} - -static void state_decompress(state_t* state, char const* src, int srcSize) -{ - char* dst = state->roundTrip.buf + state->roundTrip.pos; - int const dstCapacity = state->roundTrip.size - state->roundTrip.pos; - int const dSize = LZ4_decompress_safe_continue(state->dstream, src, dst, - srcSize, dstCapacity); - FUZZ_ASSERT(dSize >= 0); - state->roundTrip.pos += dSize; -} - -static void state_checkRoundTrip(state_t const* state) -{ - char const* data = state->data.buf; - size_t const size = state->data.size; - FUZZ_ASSERT_MSG(size == state->roundTrip.pos, "Incorrect size!"); - FUZZ_ASSERT_MSG(!memcmp(data, state->roundTrip.buf, size), "Corruption!"); -} - -/** - * Picks a dictionary size and trims the dictionary off of the data. - * We copy the dictionary to the roundTrip so our validation passes. - */ -static size_t state_trimDict(state_t* state) -{ - /* 64 KB is the max dict size, allow slightly beyond that to test trim. */ - uint32_t maxDictSize = MIN(70 * 1024, state->data.size); - size_t const dictSize = FUZZ_rand32(&state->seed, 0, maxDictSize); - DEBUGLOG(2, "dictSize = %zu", dictSize); - FUZZ_ASSERT(state->data.pos == 0); - FUZZ_ASSERT(state->roundTrip.pos == 0); - memcpy(state->roundTrip.buf, state->data.buf, dictSize); - state->data.pos += dictSize; - state->roundTrip.pos += dictSize; - return dictSize; -} - -static void state_prefixRoundTrip(state_t* state) -{ - while (state->data.pos != state->data.size) { - char const* src = state->data.buf + state->data.pos; - char* dst = state->compressed.buf + state->compressed.pos; - int const srcRemaining = state->data.size - state->data.pos; - int const srcSize = FUZZ_rand32(&state->seed, 0, srcRemaining); - int const dstCapacity = state->compressed.size - state->compressed.pos; - int const cSize = LZ4_compress_fast_continue(state->cstream, src, dst, - srcSize, dstCapacity, 0); - FUZZ_ASSERT(cSize > 0); - state->data.pos += srcSize; - state->compressed.pos += cSize; - state_decompress(state, dst, cSize); - } -} - -static void state_extDictRoundTrip(state_t* state) -{ - int i = 0; - cursor_t data2 = cursor_create(state->data.size); - memcpy(data2.buf, state->data.buf, state->data.size); - while (state->data.pos != state->data.size) { - char const* data = (i++ & 1) ? state->data.buf : data2.buf; - char const* src = data + state->data.pos; - char* dst = state->compressed.buf + state->compressed.pos; - int const srcRemaining = state->data.size - state->data.pos; - int const srcSize = FUZZ_rand32(&state->seed, 0, srcRemaining); - int const dstCapacity = state->compressed.size - state->compressed.pos; - int const cSize = LZ4_compress_fast_continue(state->cstream, src, dst, - srcSize, dstCapacity, 0); - FUZZ_ASSERT(cSize > 0); - state->data.pos += srcSize; - state->compressed.pos += cSize; - state_decompress(state, dst, cSize); - } - cursor_free(data2); -} - -static void state_randomRoundTrip(state_t* state, round_trip_t rt0, - round_trip_t rt1) -{ - if (FUZZ_rand32(&state->seed, 0, 1)) { - rt0(state); - } else { - rt1(state); - } -} - -static void state_loadDictRoundTrip(state_t* state) -{ - char const* dict = state->data.buf; - size_t const dictSize = state_trimDict(state); - LZ4_loadDict(state->cstream, dict, dictSize); - LZ4_setStreamDecode(state->dstream, dict, dictSize); - state_randomRoundTrip(state, state_prefixRoundTrip, state_extDictRoundTrip); -} - -static void state_attachDictRoundTrip(state_t* state) -{ - char const* dict = state->data.buf; - size_t const dictSize = state_trimDict(state); - LZ4_stream_t* dictStream = LZ4_createStream(); - LZ4_loadDict(dictStream, dict, dictSize); - LZ4_attach_dictionary(state->cstream, dictStream); - LZ4_setStreamDecode(state->dstream, dict, dictSize); - state_randomRoundTrip(state, state_prefixRoundTrip, state_extDictRoundTrip); - LZ4_freeStream(dictStream); -} - -static void state_prefixHCRoundTrip(state_t* state) -{ - while (state->data.pos != state->data.size) { - char const* src = state->data.buf + state->data.pos; - char* dst = state->compressed.buf + state->compressed.pos; - int const srcRemaining = state->data.size - state->data.pos; - int const srcSize = FUZZ_rand32(&state->seed, 0, srcRemaining); - int const dstCapacity = state->compressed.size - state->compressed.pos; - int const cSize = LZ4_compress_HC_continue(state->cstreamHC, src, dst, - srcSize, dstCapacity); - FUZZ_ASSERT(cSize > 0); - state->data.pos += srcSize; - state->compressed.pos += cSize; - state_decompress(state, dst, cSize); - } -} - -static void state_extDictHCRoundTrip(state_t* state) -{ - int i = 0; - cursor_t data2 = cursor_create(state->data.size); - DEBUGLOG(2, "extDictHC"); - memcpy(data2.buf, state->data.buf, state->data.size); - while (state->data.pos != state->data.size) { - char const* data = (i++ & 1) ? state->data.buf : data2.buf; - char const* src = data + state->data.pos; - char* dst = state->compressed.buf + state->compressed.pos; - int const srcRemaining = state->data.size - state->data.pos; - int const srcSize = FUZZ_rand32(&state->seed, 0, srcRemaining); - int const dstCapacity = state->compressed.size - state->compressed.pos; - int const cSize = LZ4_compress_HC_continue(state->cstreamHC, src, dst, - srcSize, dstCapacity); - FUZZ_ASSERT(cSize > 0); - DEBUGLOG(2, "srcSize = %d", srcSize); - state->data.pos += srcSize; - state->compressed.pos += cSize; - state_decompress(state, dst, cSize); - } - cursor_free(data2); -} - -static void state_loadDictHCRoundTrip(state_t* state) -{ - char const* dict = state->data.buf; - size_t const dictSize = state_trimDict(state); - LZ4_loadDictHC(state->cstreamHC, dict, dictSize); - LZ4_setStreamDecode(state->dstream, dict, dictSize); - state_randomRoundTrip(state, state_prefixHCRoundTrip, - state_extDictHCRoundTrip); -} - -static void state_attachDictHCRoundTrip(state_t* state) -{ - char const* dict = state->data.buf; - size_t const dictSize = state_trimDict(state); - LZ4_streamHC_t* dictStream = LZ4_createStreamHC(); - LZ4_setCompressionLevel(dictStream, state->level); - LZ4_loadDictHC(dictStream, dict, dictSize); - LZ4_attach_HC_dictionary(state->cstreamHC, dictStream); - LZ4_setStreamDecode(state->dstream, dict, dictSize); - state_randomRoundTrip(state, state_prefixHCRoundTrip, - state_extDictHCRoundTrip); - LZ4_freeStreamHC(dictStream); -} - -round_trip_t roundTrips[] = { - &state_prefixRoundTrip, - &state_extDictRoundTrip, - &state_loadDictRoundTrip, - &state_attachDictRoundTrip, - &state_prefixHCRoundTrip, - &state_extDictHCRoundTrip, - &state_loadDictHCRoundTrip, - &state_attachDictHCRoundTrip, -}; - -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) -{ - uint32_t seed = FUZZ_seed(&data, &size); - state_t state = state_create((char const*)data, size, seed); - const int n = sizeof(roundTrips) / sizeof(round_trip_t); - int i; - - for (i = 0; i < n; ++i) { - DEBUGLOG(2, "Round trip %d", i); - state_reset(&state, seed); - roundTrips[i](&state); - state_checkRoundTrip(&state); - } - - state_free(state); - - return 0; -} diff --git a/librocksdb-sys/lz4/ossfuzz/standaloneengine.c b/librocksdb-sys/lz4/ossfuzz/standaloneengine.c deleted file mode 100644 index 6afeffd..0000000 --- a/librocksdb-sys/lz4/ossfuzz/standaloneengine.c +++ /dev/null @@ -1,74 +0,0 @@ -#include -#include -#include - -#include "fuzz.h" - -/** - * Main procedure for standalone fuzzing engine. - * - * Reads filenames from the argument array. For each filename, read the file - * into memory and then call the fuzzing interface with the data. - */ -int main(int argc, char **argv) -{ - int ii; - for(ii = 1; ii < argc; ii++) - { - FILE *infile; - printf("[%s] ", argv[ii]); - - /* Try and open the file. */ - infile = fopen(argv[ii], "rb"); - if(infile) - { - uint8_t *buffer = NULL; - size_t buffer_len; - - printf("Opened.. "); - - /* Get the length of the file. */ - fseek(infile, 0L, SEEK_END); - buffer_len = ftell(infile); - - /* Reset the file indicator to the beginning of the file. */ - fseek(infile, 0L, SEEK_SET); - - /* Allocate a buffer for the file contents. */ - buffer = (uint8_t *)calloc(buffer_len, sizeof(uint8_t)); - if(buffer) - { - /* Read all the text from the file into the buffer. */ - fread(buffer, sizeof(uint8_t), buffer_len, infile); - printf("Read %zu bytes, fuzzing.. ", buffer_len); - - /* Call the fuzzer with the data. */ - LLVMFuzzerTestOneInput(buffer, buffer_len); - - printf("complete !!"); - - /* Free the buffer as it's no longer needed. */ - free(buffer); - buffer = NULL; - } - else - { - fprintf(stderr, - "[%s] Failed to allocate %zu bytes \n", - argv[ii], - buffer_len); - } - - /* Close the file as it's no longer needed. */ - fclose(infile); - infile = NULL; - } - else - { - /* Failed to open the file. Maybe wrong name or wrong permissions? */ - fprintf(stderr, "[%s] Open failed. \n", argv[ii]); - } - - printf("\n"); - } -} diff --git a/librocksdb-sys/lz4/ossfuzz/travisoss.sh b/librocksdb-sys/lz4/ossfuzz/travisoss.sh deleted file mode 100755 index eae9a80..0000000 --- a/librocksdb-sys/lz4/ossfuzz/travisoss.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -set -ex - -# Clone the oss-fuzz repository -git clone https://github.com/google/oss-fuzz.git /tmp/ossfuzz - -if [[ ! -d /tmp/ossfuzz/projects/lz4 ]] -then - echo "Could not find the lz4 project in ossfuzz" - exit 1 -fi - -# Modify the oss-fuzz Dockerfile so that we're checking out the current branch on travis. -if [ "x${TRAVIS_PULL_REQUEST}" = "xfalse" ] -then - sed -i "s@https://github.com/lz4/lz4.git@-b ${TRAVIS_BRANCH} https://github.com/lz4/lz4.git@" /tmp/ossfuzz/projects/lz4/Dockerfile -else - sed -i "s@https://github.com/lz4/lz4.git@-b ${TRAVIS_PULL_REQUEST_BRANCH} https://github.com/${TRAVIS_PULL_REQUEST_SLUG}.git@" /tmp/ossfuzz/projects/lz4/Dockerfile -fi - -# Try and build the fuzzers -pushd /tmp/ossfuzz -python infra/helper.py build_image --pull lz4 -python infra/helper.py build_fuzzers lz4 -popd diff --git a/librocksdb-sys/lz4/programs/.gitignore b/librocksdb-sys/lz4/programs/.gitignore deleted file mode 100644 index 9ffadd9..0000000 --- a/librocksdb-sys/lz4/programs/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -# local binary (Makefile) -lz4 -unlz4 -lz4cat -lz4c -lz4c32 -lz4-wlib -datagen -frametest -frametest32 -fullbench -fullbench32 -fuzzer -fuzzer32 -*.exe - -# tests files -tmp* - -# artefacts -*.dSYM diff --git a/librocksdb-sys/lz4/programs/COPYING b/librocksdb-sys/lz4/programs/COPYING deleted file mode 100644 index d159169..0000000 --- a/librocksdb-sys/lz4/programs/COPYING +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/librocksdb-sys/lz4/programs/Makefile b/librocksdb-sys/lz4/programs/Makefile deleted file mode 100644 index ace0d03..0000000 --- a/librocksdb-sys/lz4/programs/Makefile +++ /dev/null @@ -1,198 +0,0 @@ -# ########################################################################## -# LZ4 programs - Makefile -# Copyright (C) Yann Collet 2011-2020 -# -# This Makefile is validated for Linux, macOS, *BSD, Hurd, Solaris, MSYS2 targets -# -# GPL v2 License -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# You can contact the author at : -# - LZ4 homepage : http://www.lz4.org -# - LZ4 source repository : https://github.com/lz4/lz4 -# ########################################################################## -# lz4 : Command Line Utility, supporting gzip-like arguments -# lz4c : CLU, supporting also legacy lz4demo arguments -# lz4c32: Same as lz4c, but forced to compile in 32-bits mode -# ########################################################################## -SED = sed - -# Version numbers -LZ4DIR := ../lib -LIBVER_SRC := $(LZ4DIR)/lz4.h -LIBVER_MAJOR_SCRIPT:=`$(SED) -n '/define LZ4_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_MINOR_SCRIPT:=`$(SED) -n '/define LZ4_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_PATCH_SCRIPT:=`$(SED) -n '/define LZ4_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)` -LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) -LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) -LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) -LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) -LIBVER := $(shell echo $(LIBVER_SCRIPT)) - -LIBFILES = $(wildcard $(LZ4DIR)/*.c) -SRCFILES = $(sort $(LIBFILES) $(wildcard *.c)) -OBJFILES = $(SRCFILES:.c=.o) - -CPPFLAGS += -I$(LZ4DIR) -DXXH_NAMESPACE=LZ4_ -CFLAGS ?= -O3 -DEBUGFLAGS= -Wall -Wextra -Wundef -Wcast-qual -Wcast-align -Wshadow \ - -Wswitch-enum -Wdeclaration-after-statement -Wstrict-prototypes \ - -Wpointer-arith -Wstrict-aliasing=1 -CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) - -include ../Makefile.inc - -OS_VERSION ?= $(UNAME) -r -ifeq ($(TARGET_OS)$(shell $(OS_VERSION)),SunOS5.10) -LDFLAGS += -lrt -endif - -FLAGS = $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) - -LZ4_VERSION=$(LIBVER) -MD2ROFF = ronn -MD2ROFF_FLAGS = --roff --warnings --manual="User Commands" --organization="lz4 $(LZ4_VERSION)" - - -default: lz4-release - -# silent mode by default; verbose can be triggered by V=1 or VERBOSE=1 -$(V)$(VERBOSE).SILENT: - -all: lz4 lz4c - -all32: CFLAGS+=-m32 -all32: all - -ifeq ($(WINBASED),yes) -lz4-exe.rc: lz4-exe.rc.in - @echo creating executable resource - $(SED) -e 's|@PROGNAME@|lz4|' \ - -e 's|@LIBVER_MAJOR@|$(LIBVER_MAJOR)|g' \ - -e 's|@LIBVER_MINOR@|$(LIBVER_MINOR)|g' \ - -e 's|@LIBVER_PATCH@|$(LIBVER_PATCH)|g' \ - -e 's|@EXT@|$(EXT)|g' \ - $< >$@ - -lz4-exe.o: lz4-exe.rc - $(WINDRES) -i lz4-exe.rc -o lz4-exe.o - -lz4: $(OBJFILES) lz4-exe.o - $(CC) $(FLAGS) $^ -o $@$(EXT) -else -lz4: $(OBJFILES) - $(CC) $(FLAGS) $(OBJFILES) -o $@$(EXT) $(LDLIBS) -endif - -.PHONY: lz4-release -lz4-release: DEBUGFLAGS= -lz4-release: lz4 - -lz4-wlib: LIBFILES = -lz4-wlib: SRCFILES+= $(LZ4DIR)/xxhash.c # benchmark unit needs XXH64() -lz4-wlib: LDFLAGS += -L $(LZ4DIR) -lz4-wlib: LDLIBS = -llz4 -lz4-wlib: liblz4 $(OBJFILES) - @echo WARNING: $@ must link to an extended variant of the dynamic library which also exposes unstable symbols - $(CC) $(FLAGS) $(OBJFILES) -o $@$(EXT) $(LDLIBS) - -.PHONY:liblz4 -liblz4: - CPPFLAGS="-DLZ4F_PUBLISH_STATIC_FUNCTIONS -DLZ4_PUBLISH_STATIC_FUNCTIONS" $(MAKE) -C $(LZ4DIR) liblz4 - -lz4c: lz4 - $(LN_SF) lz4$(EXT) lz4c$(EXT) - -lz4c32: CFLAGS += -m32 -lz4c32 : $(SRCFILES) - $(CC) $(FLAGS) $^ -o $@$(EXT) - -lz4.1: lz4.1.md $(LIBVER_SRC) - cat $< | $(MD2ROFF) $(MD2ROFF_FLAGS) | $(SED) -n '/^\.\\\".*/!p' > $@ - -man: lz4.1 - -clean-man: - $(RM) lz4.1 - -preview-man: clean-man man - man ./lz4.1 - -clean: -ifeq ($(WINBASED),yes) - $(RM) *.rc -endif - $(MAKE) -C $(LZ4DIR) $@ > $(VOID) - $(RM) core *.o *.test tmp* \ - lz4$(EXT) lz4c$(EXT) lz4c32$(EXT) lz4-wlib$(EXT) \ - unlz4$(EXT) lz4cat$(EXT) - @echo Cleaning completed - - -#----------------------------------------------------------------------------- -# make install is validated only for Linux, OSX, BSD, Hurd and Solaris targets -#----------------------------------------------------------------------------- -ifeq ($(POSIX_ENV),Yes) - -unlz4: lz4 - $(LN_SF) lz4$(EXT) unlz4$(EXT) - -lz4cat: lz4 - $(LN_SF) lz4$(EXT) lz4cat$(EXT) - -DESTDIR ?= -# directory variables : GNU conventions prefer lowercase -# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html -# support both lower and uppercase (BSD), use lowercase in script -PREFIX ?= /usr/local -prefix ?= $(PREFIX) -EXEC_PREFIX ?= $(prefix) -exec_prefix ?= $(EXEC_PREFIX) -BINDIR ?= $(exec_prefix)/bin -bindir ?= $(BINDIR) -DATAROOTDIR ?= $(prefix)/share -datarootdir ?= $(DATAROOTDIR) -MANDIR ?= $(datarootdir)/man -mandir ?= $(MANDIR) -MAN1DIR ?= $(mandir)/man1 -man1dir ?= $(MAN1DIR) - -install: lz4 - @echo Installing binaries in $(DESTDIR)$(bindir) - $(INSTALL_DIR) $(DESTDIR)$(bindir)/ $(DESTDIR)$(man1dir)/ - $(INSTALL_PROGRAM) lz4$(EXT) $(DESTDIR)$(bindir)/lz4$(EXT) - $(LN_SF) lz4$(EXT) $(DESTDIR)$(bindir)/lz4c$(EXT) - $(LN_SF) lz4$(EXT) $(DESTDIR)$(bindir)/lz4cat$(EXT) - $(LN_SF) lz4$(EXT) $(DESTDIR)$(bindir)/unlz4$(EXT) - @echo Installing man pages in $(DESTDIR)$(man1dir) - $(INSTALL_DATA) lz4.1 $(DESTDIR)$(man1dir)/lz4.1 - $(LN_SF) lz4.1 $(DESTDIR)$(man1dir)/lz4c.1 - $(LN_SF) lz4.1 $(DESTDIR)$(man1dir)/lz4cat.1 - $(LN_SF) lz4.1 $(DESTDIR)$(man1dir)/unlz4.1 - @echo lz4 installation completed - -uninstall: - $(RM) $(DESTDIR)$(bindir)/lz4cat$(EXT) - $(RM) $(DESTDIR)$(bindir)/unlz4$(EXT) - $(RM) $(DESTDIR)$(bindir)/lz4$(EXT) - $(RM) $(DESTDIR)$(bindir)/lz4c$(EXT) - $(RM) $(DESTDIR)$(man1dir)/lz4.1 - $(RM) $(DESTDIR)$(man1dir)/lz4c.1 - $(RM) $(DESTDIR)$(man1dir)/lz4cat.1 - $(RM) $(DESTDIR)$(man1dir)/unlz4.1 - @echo lz4 programs successfully uninstalled - -endif diff --git a/librocksdb-sys/lz4/programs/README.md b/librocksdb-sys/lz4/programs/README.md deleted file mode 100644 index c1995af..0000000 --- a/librocksdb-sys/lz4/programs/README.md +++ /dev/null @@ -1,84 +0,0 @@ -Command Line Interface for LZ4 library -============================================ - -### Build -The Command Line Interface (CLI) can be generated -using the `make` command without any additional parameters. - -The `Makefile` script supports all [standard conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html), -including standard targets (`all`, `install`, `clean`, etc.) -and standard variables (`CC`, `CFLAGS`, `CPPFLAGS`, etc.). - -For advanced use cases, there are targets to different variations of the CLI: -- `lz4` : default CLI, with a command line syntax close to gzip -- `lz4c` : Same as `lz4` with additional support legacy lz4 commands (incompatible with gzip) -- `lz4c32` : Same as `lz4c`, but forced to compile in 32-bits mode - -The CLI generates and decodes [LZ4-compressed frames](../doc/lz4_Frame_format.md). - - -#### Aggregation of parameters -CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined into `-b1e18i1`. - - -#### Benchmark in Command Line Interface -CLI includes in-memory compression benchmark module for lz4. -The benchmark is conducted using a given filename. -The file is read into memory. -It makes benchmark more precise as it eliminates I/O overhead. - -The benchmark measures ratio, compressed size, compression and decompression speed. -One can select compression levels starting from `-b` and ending with `-e`. -The `-i` parameter selects a number of seconds used for each of tested levels. - - - -#### Usage of Command Line Interface -The full list of commands can be obtained with `-h` or `-H` parameter: -``` -Usage : - lz4 [arg] [input] [output] - -input : a filename - with no FILE, or when FILE is - or stdin, read standard input -Arguments : - -1 : Fast compression (default) - -9 : High compression - -d : decompression (default for .lz4 extension) - -z : force compression - -D FILE: use FILE as dictionary - -f : overwrite output without prompting - -k : preserve source files(s) (default) ---rm : remove source file(s) after successful de/compression - -h/-H : display help/long help and exit - -Advanced arguments : - -V : display Version number and exit - -v : verbose mode - -q : suppress warnings; specify twice to suppress errors too - -c : force write to standard output, even if it is the console - -t : test compressed file integrity - -m : multiple input files (implies automatic output filenames) - -r : operate recursively on directories (sets also -m) - -l : compress using Legacy format (Linux kernel compression) - -B# : cut file into blocks of size # bytes [32+] - or predefined block size [4-7] (default: 7) - -BD : Block dependency (improve compression ratio) - -BX : enable block checksum (default:disabled) ---no-frame-crc : disable stream checksum (default:enabled) ---content-size : compressed frame includes original size (default:not present) ---[no-]sparse : sparse mode (default:enabled on file, disabled on stdout) ---favor-decSpeed: compressed files decompress faster, but are less compressed ---fast[=#]: switch to ultra fast compression level (default: 1) - -Benchmark arguments : - -b# : benchmark file(s), using # compression level (default : 1) - -e# : test all compression levels from -bX to # (default : 1) - -i# : minimum evaluation time in seconds (default : 3s)``` -``` - -#### License - -All files in this directory are licensed under GPL-v2. -See [COPYING](COPYING) for details. -The text of the license is also included at the top of each source file. diff --git a/librocksdb-sys/lz4/programs/bench.c b/librocksdb-sys/lz4/programs/bench.c deleted file mode 100644 index 4d35ef9..0000000 --- a/librocksdb-sys/lz4/programs/bench.c +++ /dev/null @@ -1,837 +0,0 @@ -/* - bench.c - Demo program to benchmark open-source compression algorithms - Copyright (C) Yann Collet 2012-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repository : https://github.com/lz4/lz4 -*/ - - -/*-************************************ -* Compiler options -**************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif - - -/* ************************************* -* Includes -***************************************/ -#include "platform.h" /* Compiler options */ -#include "util.h" /* UTIL_GetFileSize, UTIL_sleep */ -#include /* malloc, free */ -#include /* memset */ -#include /* fprintf, fopen, ftello */ -#include /* clock_t, clock, CLOCKS_PER_SEC */ -#include /* assert */ - -#include "datagen.h" /* RDG_genBuffer */ -#include "xxhash.h" -#include "bench.h" - -#define LZ4_STATIC_LINKING_ONLY -#include "lz4.h" -#define LZ4_HC_STATIC_LINKING_ONLY -#include "lz4hc.h" -#include "lz4frame.h" /* LZ4F_decompress */ - - -/* ************************************* -* Constants -***************************************/ -#ifndef LZ4_GIT_COMMIT_STRING -# define LZ4_GIT_COMMIT_STRING "" -#else -# define LZ4_GIT_COMMIT_STRING LZ4_EXPAND_AND_QUOTE(LZ4_GIT_COMMIT) -#endif - -#define NBSECONDS 3 -#define TIMELOOP_MICROSEC 1*1000000ULL /* 1 second */ -#define TIMELOOP_NANOSEC 1*1000000000ULL /* 1 second */ -#define ACTIVEPERIOD_MICROSEC 70*1000000ULL /* 70 seconds */ -#define COOLPERIOD_SEC 10 -#define DECOMP_MULT 1 /* test decompression DECOMP_MULT times longer than compression */ - -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define LZ4_MAX_DICT_SIZE (64 KB) - -static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t)(1ULL << ((sizeof(size_t)*8)-31)); - -static U32 g_compressibilityDefault = 50; - - -/* ************************************* -* console display -***************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } -static U32 g_displayLevel = 2; /* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */ - -#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \ - if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \ - { g_time = clock(); DISPLAY(__VA_ARGS__); \ - if (g_displayLevel>=4) fflush(stdout); } } -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; - - -/* ************************************* -* DEBUG and error conditions -***************************************/ -#ifndef DEBUG -# define DEBUG 0 -#endif -#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); -#define END_PROCESS(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", error); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, "\n"); \ - exit(error); \ -} - -#define LZ4_isError(errcode) (errcode==0) - - -/* ************************************* -* Benchmark Parameters -***************************************/ -static U32 g_nbSeconds = NBSECONDS; -static size_t g_blockSize = 0; -int g_additionalParam = 0; -int g_benchSeparately = 0; -int g_decodeOnly = 0; -unsigned g_skipChecksums = 0; - -void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; } - -void BMK_setAdditionalParam(int additionalParam) { g_additionalParam=additionalParam; } - -void BMK_setNbSeconds(unsigned nbSeconds) -{ - g_nbSeconds = nbSeconds; - DISPLAYLEVEL(3, "- test >= %u seconds per compression / decompression -\n", g_nbSeconds); -} - -void BMK_setBlockSize(size_t blockSize) { g_blockSize = blockSize; } - -void BMK_setBenchSeparately(int separate) { g_benchSeparately = (separate!=0); } - -void BMK_setDecodeOnlyMode(int set) { g_decodeOnly = (set!=0); } - -void BMK_skipChecksums(int skip) { g_skipChecksums = (skip!=0); } - - -/* ************************************* - * Compression state management -***************************************/ - -struct compressionParameters -{ - int cLevel; - const char* dictBuf; - int dictSize; - - LZ4_stream_t* LZ4_stream; - LZ4_stream_t* LZ4_dictStream; - LZ4_streamHC_t* LZ4_streamHC; - LZ4_streamHC_t* LZ4_dictStreamHC; - - void (*initFunction)( - struct compressionParameters* pThis); - void (*resetFunction)( - const struct compressionParameters* pThis); - int (*blockFunction)( - const struct compressionParameters* pThis, - const char* src, char* dst, int srcSize, int dstSize); - void (*cleanupFunction)( - const struct compressionParameters* pThis); -}; - -static void -LZ4_compressInitNoStream(struct compressionParameters* pThis) -{ - pThis->LZ4_stream = NULL; - pThis->LZ4_dictStream = NULL; - pThis->LZ4_streamHC = NULL; - pThis->LZ4_dictStreamHC = NULL; -} - -static void -LZ4_compressInitStream(struct compressionParameters* pThis) -{ - pThis->LZ4_stream = LZ4_createStream(); - pThis->LZ4_dictStream = LZ4_createStream(); - pThis->LZ4_streamHC = NULL; - pThis->LZ4_dictStreamHC = NULL; - LZ4_loadDict(pThis->LZ4_dictStream, pThis->dictBuf, pThis->dictSize); -} - -static void -LZ4_compressInitStreamHC(struct compressionParameters* pThis) -{ - pThis->LZ4_stream = NULL; - pThis->LZ4_dictStream = NULL; - pThis->LZ4_streamHC = LZ4_createStreamHC(); - pThis->LZ4_dictStreamHC = LZ4_createStreamHC(); - LZ4_loadDictHC(pThis->LZ4_dictStreamHC, pThis->dictBuf, pThis->dictSize); -} - -static void -LZ4_compressResetNoStream(const struct compressionParameters* pThis) -{ - (void)pThis; -} - -static void -LZ4_compressResetStream(const struct compressionParameters* pThis) -{ - LZ4_resetStream_fast(pThis->LZ4_stream); - LZ4_attach_dictionary(pThis->LZ4_stream, pThis->LZ4_dictStream); -} - -static void -LZ4_compressResetStreamHC(const struct compressionParameters* pThis) -{ - LZ4_resetStreamHC_fast(pThis->LZ4_streamHC, pThis->cLevel); - LZ4_attach_HC_dictionary(pThis->LZ4_streamHC, pThis->LZ4_dictStreamHC); -} - -static int -LZ4_compressBlockNoStream(const struct compressionParameters* pThis, - const char* src, char* dst, - int srcSize, int dstSize) -{ - int const acceleration = (pThis->cLevel < 0) ? -pThis->cLevel + 1 : 1; - return LZ4_compress_fast(src, dst, srcSize, dstSize, acceleration); -} - -static int -LZ4_compressBlockNoStreamHC(const struct compressionParameters* pThis, - const char* src, char* dst, - int srcSize, int dstSize) -{ - return LZ4_compress_HC(src, dst, srcSize, dstSize, pThis->cLevel); -} - -static int -LZ4_compressBlockStream(const struct compressionParameters* pThis, - const char* src, char* dst, - int srcSize, int dstSize) -{ - int const acceleration = (pThis->cLevel < 0) ? -pThis->cLevel + 1 : 1; - return LZ4_compress_fast_continue(pThis->LZ4_stream, src, dst, srcSize, dstSize, acceleration); -} - -static int -LZ4_compressBlockStreamHC(const struct compressionParameters* pThis, - const char* src, char* dst, - int srcSize, int dstSize) -{ - return LZ4_compress_HC_continue(pThis->LZ4_streamHC, src, dst, srcSize, dstSize); -} - -static void -LZ4_compressCleanupNoStream(const struct compressionParameters* pThis) -{ - (void)pThis; -} - -static void -LZ4_compressCleanupStream(const struct compressionParameters* pThis) -{ - LZ4_freeStream(pThis->LZ4_stream); - LZ4_freeStream(pThis->LZ4_dictStream); -} - -static void -LZ4_compressCleanupStreamHC(const struct compressionParameters* pThis) -{ - LZ4_freeStreamHC(pThis->LZ4_streamHC); - LZ4_freeStreamHC(pThis->LZ4_dictStreamHC); -} - -static void -LZ4_buildCompressionParameters(struct compressionParameters* pParams, - int cLevel, - const char* dictBuf, int dictSize) -{ - pParams->cLevel = cLevel; - pParams->dictBuf = dictBuf; - pParams->dictSize = dictSize; - - if (dictSize) { - if (cLevel < LZ4HC_CLEVEL_MIN) { - pParams->initFunction = LZ4_compressInitStream; - pParams->resetFunction = LZ4_compressResetStream; - pParams->blockFunction = LZ4_compressBlockStream; - pParams->cleanupFunction = LZ4_compressCleanupStream; - } else { - pParams->initFunction = LZ4_compressInitStreamHC; - pParams->resetFunction = LZ4_compressResetStreamHC; - pParams->blockFunction = LZ4_compressBlockStreamHC; - pParams->cleanupFunction = LZ4_compressCleanupStreamHC; - } - } else { - pParams->initFunction = LZ4_compressInitNoStream; - pParams->resetFunction = LZ4_compressResetNoStream; - pParams->cleanupFunction = LZ4_compressCleanupNoStream; - - if (cLevel < LZ4HC_CLEVEL_MIN) { - pParams->blockFunction = LZ4_compressBlockNoStream; - } else { - pParams->blockFunction = LZ4_compressBlockNoStreamHC; - } - } -} - - -typedef int (*DecFunction_f)(const char* src, char* dst, - int srcSize, int dstCapacity, - const char* dictStart, int dictSize); - -static LZ4F_dctx* g_dctx = NULL; - -static int -LZ4F_decompress_binding(const char* src, char* dst, - int srcSize, int dstCapacity, - const char* dictStart, int dictSize) -{ - size_t dstSize = (size_t)dstCapacity; - size_t readSize = (size_t)srcSize; - LZ4F_decompressOptions_t dOpt = { 1, 0, 0, 0 }; - size_t decStatus; - dOpt.skipChecksums = g_skipChecksums; - decStatus = LZ4F_decompress(g_dctx, - dst, &dstSize, - src, &readSize, - &dOpt); - if ( (decStatus == 0) /* decompression successful */ - && ((int)readSize==srcSize) /* consume all input */ ) - return (int)dstSize; - /* else, error */ - return -1; - (void)dictStart; (void)dictSize; /* not compatible with dictionary yet */ -} - - -/* ******************************************************** -* Bench functions -**********************************************************/ -typedef struct { - const char* srcPtr; - size_t srcSize; - char* cPtr; - size_t cRoom; - size_t cSize; - char* resPtr; - size_t resSize; -} blockParam_t; - -#define MIN(a,b) ((a)<(b) ? (a) : (b)) -#define MAX(a,b) ((a)>(b) ? (a) : (b)) - -static int BMK_benchMem(const void* srcBuffer, size_t srcSize, - const char* displayName, int cLevel, - const size_t* fileSizes, U32 nbFiles, - const char* dictBuf, int dictSize) -{ - size_t const blockSize = (g_blockSize>=32 && !g_decodeOnly ? g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ; - U32 const maxNbBlocks = (U32)((srcSize + (blockSize-1)) / blockSize) + nbFiles; - blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t)); - size_t const maxCompressedSize = (size_t)LZ4_compressBound((int)srcSize) + (maxNbBlocks * 1024); /* add some room for safety */ - void* const compressedBuffer = malloc(maxCompressedSize); - size_t const decMultiplier = g_decodeOnly ? 255 : 1; - size_t const maxInSize = (size_t)LZ4_MAX_INPUT_SIZE / decMultiplier; - size_t const maxDecSize = srcSize < maxInSize ? srcSize * decMultiplier : LZ4_MAX_INPUT_SIZE; - void* const resultBuffer = malloc(maxDecSize); - U32 nbBlocks; - struct compressionParameters compP; - - /* checks */ - if (!compressedBuffer || !resultBuffer || !blockTable) - END_PROCESS(31, "allocation error : not enough memory"); - - if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */ - - /* init */ - LZ4_buildCompressionParameters(&compP, cLevel, dictBuf, dictSize); - compP.initFunction(&compP); - if (g_dctx==NULL) { - LZ4F_createDecompressionContext(&g_dctx, LZ4F_VERSION); - if (g_dctx==NULL) - END_PROCESS(1, "allocation error - decompression state"); - } - - /* Init blockTable data */ - { const char* srcPtr = (const char*)srcBuffer; - char* cPtr = (char*)compressedBuffer; - char* resPtr = (char*)resultBuffer; - U32 fileNb; - for (nbBlocks=0, fileNb=0; fileNb ACTIVEPERIOD_MICROSEC) { - DISPLAYLEVEL(2, "\rcooling down ... \r"); - UTIL_sleep(COOLPERIOD_SEC); - coolTime = UTIL_getTime(); - } - - /* Compression */ - DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (U32)totalRSize); - if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase compressed buffer */ - - UTIL_sleepMilli(1); /* give processor time to other processes */ - UTIL_waitForNextTick(); - - if (!cCompleted) { /* still some time to do compression tests */ - UTIL_time_t const clockStart = UTIL_getTime(); - U32 nbLoops; - for (nbLoops=0; nbLoops < nbCompressionLoops; nbLoops++) { - U32 blockNb; - compP.resetFunction(&compP); - for (blockNb=0; blockNb 0) { - if (clockSpan < fastestC * nbCompressionLoops) - fastestC = clockSpan / nbCompressionLoops; - assert(fastestC > 0); - nbCompressionLoops = (U32)(TIMELOOP_NANOSEC / fastestC) + 1; /* aim for ~1sec */ - } else { - assert(nbCompressionLoops < 40000000); /* avoid overflow */ - nbCompressionLoops *= 100; - } - totalCTime += clockSpan; - cCompleted = totalCTime>maxTime; - } - - cSize = 0; - { U32 blockNb; for (blockNb=0; blockNb%10u (%5.3f),%6.1f MB/s\r", - marks[markNb], displayName, - (U32)totalRSize, (U32)cSize, ratio, - ((double)totalRSize / fastestC) * 1000 ); - } - (void)fastestD; (void)crcOrig; /* unused when decompression disabled */ -#if 1 - /* Decompression */ - if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */ - - UTIL_sleepMilli(5); /* give processor time to other processes */ - UTIL_waitForNextTick(); - - if (!dCompleted) { - const DecFunction_f decFunction = g_decodeOnly ? - LZ4F_decompress_binding : LZ4_decompress_safe_usingDict; - const char* const decString = g_decodeOnly ? - "LZ4F_decompress" : "LZ4_decompress_safe_usingDict"; - UTIL_time_t const clockStart = UTIL_getTime(); - U32 nbLoops; - - for (nbLoops=0; nbLoops < nbDecodeLoops; nbLoops++) { - U32 blockNb; - for (blockNb=0; blockNb 0) { - if (clockSpan < fastestD * nbDecodeLoops) - fastestD = clockSpan / nbDecodeLoops; - assert(fastestD > 0); - nbDecodeLoops = (U32)(TIMELOOP_NANOSEC / fastestD) + 1; /* aim for ~1sec */ - } else { - assert(nbDecodeLoops < 40000000); /* avoid overflow */ - nbDecodeLoops *= 100; - } - totalDTime += clockSpan; - dCompleted = totalDTime > (DECOMP_MULT*maxTime); - } } - - if (g_decodeOnly) { - unsigned u; - totalRSize = 0; - for (u=0; u%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r", - marks[markNb], displayName, - (U32)totalRSize, (U32)cSize, ratio, - ((double)totalRSize / fastestC) * 1000, - ((double)totalRSize / fastestD) * 1000); - - /* CRC Checking (not possible in decode-only mode)*/ - if (!g_decodeOnly) { - U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); - if (crcOrig!=crcCheck) { - size_t u; - DISPLAY("\n!!! WARNING !!! %17s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck); - for (u=0; u u) break; - bacc += blockTable[segNb].srcSize; - } - pos = (U32)(u - bacc); - bNb = pos / (128 KB); - DISPLAY("(block %u, sub %u, pos %u) \n", segNb, bNb, pos); - break; - } - if (u==srcSize-1) { /* should never happen */ - DISPLAY("no difference detected\n"); - } } - break; - } } /* CRC Checking */ -#endif - } /* for (testNb = 1; testNb <= (g_nbSeconds + !g_nbSeconds); testNb++) */ - - if (g_displayLevel == 1) { - double const cSpeed = ((double)srcSize / fastestC) * 1000; - double const dSpeed = ((double)srcSize / fastestD) * 1000; - if (g_additionalParam) - DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, g_additionalParam); - else - DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName); - } - DISPLAYLEVEL(2, "%2i#\n", cLevel); - } /* Bench */ - - /* clean up */ - compP.cleanupFunction(&compP); - free(blockTable); - free(compressedBuffer); - free(resultBuffer); - return 0; -} - - -static size_t BMK_findMaxMem(U64 requiredMem) -{ - size_t step = 64 MB; - BYTE* testmem=NULL; - - requiredMem = (((requiredMem >> 26) + 1) << 26); - requiredMem += 2*step; - if (requiredMem > maxMemory) requiredMem = maxMemory; - - while (!testmem) { - if (requiredMem > step) requiredMem -= step; - else requiredMem >>= 1; - testmem = (BYTE*) malloc ((size_t)requiredMem); - } - free (testmem); - - /* keep some space available */ - if (requiredMem > step) requiredMem -= step; - else requiredMem >>= 1; - - return (size_t)requiredMem; -} - - -static void BMK_benchCLevel(void* srcBuffer, size_t benchedSize, - const char* displayName, int cLevel, int cLevelLast, - const size_t* fileSizes, unsigned nbFiles, - const char* dictBuf, int dictSize) -{ - int l; - - const char* pch = strrchr(displayName, '\\'); /* Windows */ - if (!pch) pch = strrchr(displayName, '/'); /* Linux */ - if (pch) displayName = pch+1; - - SET_REALTIME_PRIORITY; - - if (g_displayLevel == 1 && !g_additionalParam) - DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", LZ4_VERSION_STRING, LZ4_GIT_COMMIT_STRING, (U32)benchedSize, g_nbSeconds, (U32)(g_blockSize>>10)); - - if (cLevelLast < cLevel) cLevelLast = cLevel; - - for (l=cLevel; l <= cLevelLast; l++) { - BMK_benchMem(srcBuffer, benchedSize, - displayName, l, - fileSizes, nbFiles, - dictBuf, dictSize); - } -} - - -/*! BMK_loadFiles() : - Loads `buffer` with content of files listed within `fileNamesTable`. - At most, fills `buffer` entirely */ -static void BMK_loadFiles(void* buffer, size_t bufferSize, - size_t* fileSizes, - const char** fileNamesTable, unsigned nbFiles) -{ - size_t pos = 0, totalSize = 0; - unsigned n; - for (n=0; n bufferSize-pos) { /* buffer too small - stop after this file */ - fileSize = bufferSize-pos; - nbFiles=n; - } - { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f); - if (readSize != (size_t)fileSize) END_PROCESS(11, "could not read %s", fileNamesTable[n]); - pos += readSize; } - fileSizes[n] = (size_t)fileSize; - totalSize += (size_t)fileSize; - fclose(f); - } - - if (totalSize == 0) END_PROCESS(12, "no data to bench"); -} - -static void BMK_benchFileTable(const char** fileNamesTable, unsigned nbFiles, - int cLevel, int cLevelLast, - const char* dictBuf, int dictSize) -{ - void* srcBuffer; - size_t benchedSize; - size_t* fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t)); - U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles); - char mfName[20] = {0}; - - if (!fileSizes) END_PROCESS(12, "not enough memory for fileSizes"); - - /* Memory allocation & restrictions */ - benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3; - if (benchedSize==0) END_PROCESS(12, "not enough memory"); - if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad; - if (benchedSize > LZ4_MAX_INPUT_SIZE) { - benchedSize = LZ4_MAX_INPUT_SIZE; - DISPLAY("File(s) bigger than LZ4's max input size; testing %u MB only...\n", (U32)(benchedSize >> 20)); - } else { - if (benchedSize < totalSizeToLoad) - DISPLAY("Not enough memory; testing %u MB only...\n", (U32)(benchedSize >> 20)); - } - srcBuffer = malloc(benchedSize + !benchedSize); /* avoid alloc of zero */ - if (!srcBuffer) END_PROCESS(12, "not enough memory"); - - /* Load input buffer */ - BMK_loadFiles(srcBuffer, benchedSize, fileSizes, fileNamesTable, nbFiles); - - /* Bench */ - snprintf (mfName, sizeof(mfName), " %u files", nbFiles); - { const char* displayName = (nbFiles > 1) ? mfName : fileNamesTable[0]; - BMK_benchCLevel(srcBuffer, benchedSize, - displayName, cLevel, cLevelLast, - fileSizes, nbFiles, - dictBuf, dictSize); - } - - /* clean up */ - free(srcBuffer); - free(fileSizes); -} - - -static void BMK_syntheticTest(int cLevel, int cLevelLast, double compressibility, - const char* dictBuf, int dictSize) -{ - char name[20] = {0}; - size_t benchedSize = 10000000; - void* const srcBuffer = malloc(benchedSize); - - /* Memory allocation */ - if (!srcBuffer) END_PROCESS(21, "not enough memory"); - - /* Fill input buffer */ - RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); - - /* Bench */ - snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100)); - BMK_benchCLevel(srcBuffer, benchedSize, name, cLevel, cLevelLast, &benchedSize, 1, dictBuf, dictSize); - - /* clean up */ - free(srcBuffer); -} - - -static int -BMK_benchFilesSeparately(const char** fileNamesTable, unsigned nbFiles, - int cLevel, int cLevelLast, - const char* dictBuf, int dictSize) -{ - unsigned fileNb; - if (cLevel > LZ4HC_CLEVEL_MAX) cLevel = LZ4HC_CLEVEL_MAX; - if (cLevelLast > LZ4HC_CLEVEL_MAX) cLevelLast = LZ4HC_CLEVEL_MAX; - if (cLevelLast < cLevel) cLevelLast = cLevel; - - for (fileNb=0; fileNb LZ4HC_CLEVEL_MAX) cLevel = LZ4HC_CLEVEL_MAX; - if (g_decodeOnly) { - DISPLAYLEVEL(2, "Benchmark Decompression of LZ4 Frame "); - if (g_skipChecksums) { - DISPLAYLEVEL(2, "_without_ checksum even when present \n"); - } else { - DISPLAYLEVEL(2, "+ Checksum when present \n"); - } - cLevelLast = cLevel; - } - if (cLevelLast > LZ4HC_CLEVEL_MAX) cLevelLast = LZ4HC_CLEVEL_MAX; - if (cLevelLast < cLevel) cLevelLast = cLevel; - if (cLevelLast > cLevel) - DISPLAYLEVEL(2, "Benchmarking levels from %d to %d\n", cLevel, cLevelLast); - - if (dictFileName) { - FILE* dictFile = NULL; - U64 const dictFileSize = UTIL_getFileSize(dictFileName); - if (!dictFileSize) - END_PROCESS(25, "Dictionary error : could not stat dictionary file"); - if (g_decodeOnly) - END_PROCESS(26, "Error : LZ4 Frame decoder mode not compatible with dictionary yet"); - - dictFile = fopen(dictFileName, "rb"); - if (!dictFile) - END_PROCESS(25, "Dictionary error : could not open dictionary file"); - - if (dictFileSize > LZ4_MAX_DICT_SIZE) { - dictSize = LZ4_MAX_DICT_SIZE; - if (UTIL_fseek(dictFile, (long)(dictFileSize - dictSize), SEEK_SET)) - END_PROCESS(25, "Dictionary error : could not seek dictionary file"); - } else { - dictSize = (size_t)dictFileSize; - } - - dictBuf = (char*)malloc(dictSize); - if (!dictBuf) END_PROCESS(25, "Allocation error : not enough memory"); - - if (fread(dictBuf, 1, dictSize, dictFile) != dictSize) - END_PROCESS(25, "Dictionary error : could not read dictionary file"); - - fclose(dictFile); - } - - if (nbFiles == 0) - BMK_syntheticTest(cLevel, cLevelLast, compressibility, dictBuf, (int)dictSize); - else { - if (g_benchSeparately) - BMK_benchFilesSeparately(fileNamesTable, nbFiles, cLevel, cLevelLast, dictBuf, (int)dictSize); - else - BMK_benchFileTable(fileNamesTable, nbFiles, cLevel, cLevelLast, dictBuf, (int)dictSize); - } - - free(dictBuf); - return 0; -} diff --git a/librocksdb-sys/lz4/programs/bench.h b/librocksdb-sys/lz4/programs/bench.h deleted file mode 100644 index 1d81a99..0000000 --- a/librocksdb-sys/lz4/programs/bench.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - bench.h - Demo program to benchmark open-source compression algorithm - Copyright (C) Yann Collet 2012-2020 - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -#ifndef BENCH_H_125623623633 -#define BENCH_H_125623623633 - -#include - -/* BMK_benchFiles() : - * Benchmark all files provided through array @fileNamesTable. - * All files must be valid, otherwise benchmark fails. - * Roundtrip measurements are done for each file individually, but - * unless BMK_setBenchSeparately() is set, all results are agglomerated. - * The method benchmarks all compression levels from @cLevelStart to @cLevelLast, - * both inclusive, providing one result per compression level. - * If @cLevelLast <= @cLevelStart, BMK_benchFiles() benchmarks @cLevelStart only. - * @dictFileName is optional, it's possible to provide NULL. - * When provided, compression and decompression use the specified file as dictionary. - * Only one dictionary can be provided, in which case it's applied to all benchmarked files. -**/ -int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles, - int cLevelStart, int cLevelLast, - const char* dictFileName); - -/* Set Parameters */ -void BMK_setNbSeconds(unsigned nbSeconds); /* minimum benchmark duration, in seconds, for both compression and decompression */ -void BMK_setBlockSize(size_t blockSize); /* Internally cut input file(s) into independent blocks of specified size */ -void BMK_setNotificationLevel(unsigned level); /* Influence verbosity level */ -void BMK_setBenchSeparately(int separate); /* When providing multiple files, output one result per file */ -void BMK_setDecodeOnlyMode(int set); /* v1.9.4+: set benchmark mode to decode only */ -void BMK_skipChecksums(int skip); /* v1.9.4+: only useful for DecodeOnlyMode; do not calculate checksum when present, to save CPU time */ - -void BMK_setAdditionalParam(int additionalParam); /* hidden param, influence output format, for python parsing */ - -#endif /* BENCH_H_125623623633 */ diff --git a/librocksdb-sys/lz4/programs/datagen.c b/librocksdb-sys/lz4/programs/datagen.c deleted file mode 100644 index f448640..0000000 --- a/librocksdb-sys/lz4/programs/datagen.c +++ /dev/null @@ -1,189 +0,0 @@ -/* - datagen.c - compressible data generator test tool - Copyright (C) Yann Collet 2012-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - Public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/************************************** -* Includes -**************************************/ -#include "platform.h" /* Compiler options, SET_BINARY_MODE */ -#include "util.h" /* U32 */ -#include /* malloc */ -#include /* FILE, fwrite */ -#include /* memcpy */ -#include - - -/************************************** -* Constants -**************************************/ -#define KB *(1 <<10) - -#define PRIME1 2654435761U -#define PRIME2 2246822519U - - -/************************************** -* Local types -**************************************/ -#define LTLOG 13 -#define LTSIZE (1<> (32 - r))) -static unsigned int RDG_rand(U32* src) -{ - U32 rand32 = *src; - rand32 *= PRIME1; - rand32 ^= PRIME2; - rand32 = RDG_rotl32(rand32, 13); - *src = rand32; - return rand32; -} - - -static void RDG_fillLiteralDistrib(litDistribTable lt, double ld) -{ - BYTE const firstChar = ld <= 0.0 ? 0 : '('; - BYTE const lastChar = ld <= 0.0 ? 255 : '}'; - BYTE character = ld <= 0.0 ? 0 : '0'; - U32 u = 0; - - while (u lastChar) character = firstChar; - } -} - - -static BYTE RDG_genChar(U32* seed, const litDistribTable lt) -{ - U32 id = RDG_rand(seed) & LTMASK; - return (lt[id]); -} - - -#define RDG_DICTSIZE (32 KB) -#define RDG_RAND15BITS ((RDG_rand(seed) >> 3) & 32767) -#define RDG_RANDLENGTH ( ((RDG_rand(seed) >> 7) & 7) ? (RDG_rand(seed) & 15) : (RDG_rand(seed) & 511) + 15) -void RDG_genBlock(void* buffer, size_t buffSize, size_t prefixSize, double matchProba, litDistribTable lt, unsigned* seedPtr) -{ - BYTE* buffPtr = (BYTE*)buffer; - const U32 matchProba32 = (U32)(32768 * matchProba); - size_t pos = prefixSize; - U32* seed = seedPtr; - - /* special case */ - while (matchProba >= 1.0) { - size_t size0 = RDG_rand(seed) & 3; - size0 = (size_t)1 << (16 + size0 * 2); - size0 += RDG_rand(seed) & (size0-1); /* because size0 is power of 2*/ - if (buffSize < pos + size0) { - memset(buffPtr+pos, 0, buffSize-pos); - return; - } - memset(buffPtr+pos, 0, size0); - pos += size0; - buffPtr[pos-1] = RDG_genChar(seed, lt); - } - - /* init */ - if (pos==0) { - buffPtr[0] = RDG_genChar(seed, lt); - pos=1; - } - - /* Generate compressible data */ - while (pos < buffSize) { - /* Select : Literal (char) or Match (within 32K) */ - if (RDG_RAND15BITS < matchProba32) { - /* Copy (within 32K) */ - size_t match; - size_t d; - int length = RDG_RANDLENGTH + 4; - U32 offset = RDG_RAND15BITS + 1; - if (offset > pos) offset = (U32)pos; - match = pos - offset; - d = pos + length; - if (d > buffSize) d = buffSize; - while (pos < d) buffPtr[pos++] = buffPtr[match++]; - } else { - /* Literal (noise) */ - size_t d; - size_t length = RDG_RANDLENGTH; - d = pos + length; - if (d > buffSize) d = buffSize; - while (pos < d) buffPtr[pos++] = RDG_genChar(seed, lt); - } - } -} - - -void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed) -{ - litDistribTable lt; - if (litProba==0.0) litProba = matchProba / 4.5; - RDG_fillLiteralDistrib(lt, litProba); - RDG_genBlock(buffer, size, 0, matchProba, lt, &seed); -} - - -#define RDG_BLOCKSIZE (128 KB) -void RDG_genOut(unsigned long long size, double matchProba, double litProba, unsigned seed) -{ - BYTE buff[RDG_DICTSIZE + RDG_BLOCKSIZE]; - U64 total = 0; - size_t genBlockSize = RDG_BLOCKSIZE; - litDistribTable lt; - - /* init */ - if (litProba==0.0) litProba = matchProba / 4.5; - RDG_fillLiteralDistrib(lt, litProba); - SET_BINARY_MODE(stdout); - - /* Generate dict */ - RDG_genBlock(buff, RDG_DICTSIZE, 0, matchProba, lt, &seed); - - /* Generate compressible data */ - while (total < size) { - RDG_genBlock(buff, RDG_DICTSIZE+RDG_BLOCKSIZE, RDG_DICTSIZE, matchProba, lt, &seed); - if (size-total < RDG_BLOCKSIZE) genBlockSize = (size_t)(size-total); - total += genBlockSize; - fwrite(buff, 1, genBlockSize, stdout); /* should check potential write error */ - /* update dict */ - memcpy(buff, buff + RDG_BLOCKSIZE, RDG_DICTSIZE); - } -} diff --git a/librocksdb-sys/lz4/programs/datagen.h b/librocksdb-sys/lz4/programs/datagen.h deleted file mode 100644 index c20c9c7..0000000 --- a/librocksdb-sys/lz4/programs/datagen.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - datagen.h - compressible data generator header - Copyright (C) Yann Collet 2012-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - Public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - - -#include /* size_t */ - -void RDG_genOut(unsigned long long size, double matchProba, double litProba, unsigned seed); -void RDG_genBuffer(void* buffer, size_t size, double matchProba, double litProba, unsigned seed); -/* RDG_genOut - Generate 'size' bytes of compressible data into stdout. - Compressibility can be controlled using 'matchProba'. - 'LitProba' is optional, and affect variability of bytes. If litProba==0.0, default value is used. - Generated data can be selected using 'seed'. - If (matchProba, litProba and seed) are equal, the function always generate the same content. - - RDG_genBuffer - Same as RDG_genOut, but generate data into provided buffer -*/ diff --git a/librocksdb-sys/lz4/programs/lz4-exe.rc.in b/librocksdb-sys/lz4/programs/lz4-exe.rc.in deleted file mode 100644 index bcf4d7d..0000000 --- a/librocksdb-sys/lz4/programs/lz4-exe.rc.in +++ /dev/null @@ -1,26 +0,0 @@ -1 VERSIONINFO -FILEVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0 -PRODUCTVERSION @LIBVER_MAJOR@,@LIBVER_MINOR@,@LIBVER_PATCH@,0 -FILEFLAGSMASK 0 -FILEOS 0x40000 -FILETYPE 1 -{ - BLOCK "StringFileInfo" - { - BLOCK "040904B0" - { - VALUE "CompanyName", "Yann Collet" - VALUE "FileDescription", "Extremely fast compression" - VALUE "FileVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0" - VALUE "InternalName", "@PROGNAME@" - VALUE "LegalCopyright", "Copyright (C) 2013-2020, Yann Collet" - VALUE "OriginalFilename", "@PROGNAME@.@EXT@" - VALUE "ProductName", "LZ4" - VALUE "ProductVersion", "@LIBVER_MAJOR@.@LIBVER_MINOR@.@LIBVER_PATCH@.0" - } - } - BLOCK "VarFileInfo" - { - VALUE "Translation", 0x0409, 1200 - } -} diff --git a/librocksdb-sys/lz4/programs/lz4.1 b/librocksdb-sys/lz4/programs/lz4.1 deleted file mode 100644 index 7cb98d6..0000000 --- a/librocksdb-sys/lz4/programs/lz4.1 +++ /dev/null @@ -1,249 +0,0 @@ -. -.TH "LZ4" "1" "August 2022" "lz4 v1.9.4" "User Commands" -. -.SH "NAME" -\fBlz4\fR \- lz4, unlz4, lz4cat \- Compress or decompress \.lz4 files -. -.SH "SYNOPSIS" -\fBlz4\fR [\fIOPTIONS\fR] [\-|INPUT\-FILE] \fIOUTPUT\-FILE\fR -. -.P -\fBunlz4\fR is equivalent to \fBlz4 \-d\fR -. -.P -\fBlz4cat\fR is equivalent to \fBlz4 \-dcfm\fR -. -.P -When writing scripts that need to decompress files, it is recommended to always use the name \fBlz4\fR with appropriate arguments (\fBlz4 \-d\fR or \fBlz4 \-dc\fR) instead of the names \fBunlz4\fR and \fBlz4cat\fR\. -. -.SH "DESCRIPTION" -\fBlz4\fR is an extremely fast lossless compression algorithm, based on \fBbyte\-aligned LZ77\fR family of compression scheme\. \fBlz4\fR offers compression speeds > 500 MB/s per core, linearly scalable with multi\-core CPUs\. It features an extremely fast decoder, offering speed in multiple GB/s per core, typically reaching RAM speed limit on multi\-core systems\. The native file format is the \fB\.lz4\fR format\. -. -.SS "Difference between lz4 and gzip" -\fBlz4\fR supports a command line syntax similar \fIbut not identical\fR to \fBgzip(1)\fR\. Differences are : -. -.IP "\(bu" 4 -\fBlz4\fR compresses a single file by default (see \fB\-m\fR for multiple files) -. -.IP "\(bu" 4 -\fBlz4 file1 file2\fR means : compress file1 \fIinto\fR file2 -. -.IP "\(bu" 4 -\fBlz4 file\.lz4\fR will default to decompression (use \fB\-z\fR to force compression) -. -.IP "\(bu" 4 -\fBlz4\fR preserves original files (see \fB\-\-rm\fR to erase source file on completion) -. -.IP "\(bu" 4 -\fBlz4\fR shows real\-time notification statistics during compression or decompression of a single file (use \fB\-q\fR to silence them) -. -.IP "\(bu" 4 -When no destination is specified, result is sent on implicit output, which depends on \fBstdout\fR status\. When \fBstdout\fR \fIis Not the console\fR, it becomes the implicit output\. Otherwise, if \fBstdout\fR is the console, the implicit output is \fBfilename\.lz4\fR\. -. -.IP "\(bu" 4 -It is considered bad practice to rely on implicit output in scripts\. because the script\'s environment may change\. Always use explicit output in scripts\. \fB\-c\fR ensures that output will be \fBstdout\fR\. Conversely, providing a destination name, or using \fB\-m\fR ensures that the output will be either the specified name, or \fBfilename\.lz4\fR respectively\. -. -.IP "" 0 -. -.P -Default behaviors can be modified by opt\-in commands, detailed below\. -. -.IP "\(bu" 4 -\fBlz4 \-m\fR makes it possible to provide multiple input filenames, which will be compressed into files using suffix \fB\.lz4\fR\. Progress notifications become disabled by default (use \fB\-v\fR to enable them)\. This mode has a behavior which more closely mimics \fBgzip\fR command line, with the main remaining difference being that source files are preserved by default\. -. -.IP "\(bu" 4 -Similarly, \fBlz4 \-m \-d\fR can decompress multiple \fB*\.lz4\fR files\. -. -.IP "\(bu" 4 -It\'s possible to opt\-in to erase source files on successful compression or decompression, using \fB\-\-rm\fR command\. -. -.IP "\(bu" 4 -Consequently, \fBlz4 \-m \-\-rm\fR behaves the same as \fBgzip\fR\. -. -.IP "" 0 -. -.SS "Concatenation of \.lz4 files" -It is possible to concatenate \fB\.lz4\fR files as is\. \fBlz4\fR will decompress such files as if they were a single \fB\.lz4\fR file\. For example: -. -.IP "" 4 -. -.nf - -lz4 file1 > foo\.lz4 -lz4 file2 >> foo\.lz4 -. -.fi -. -.IP "" 0 -. -.P -Then \fBlz4cat foo\.lz4\fR is equivalent to \fBcat file1 file2\fR\. -. -.SH "OPTIONS" -. -.SS "Short commands concatenation" -In some cases, some options can be expressed using short command \fB\-x\fR or long command \fB\-\-long\-word\fR\. Short commands can be concatenated together\. For example, \fB\-d \-c\fR is equivalent to \fB\-dc\fR\. Long commands cannot be concatenated\. They must be clearly separated by a space\. -. -.SS "Multiple commands" -When multiple contradictory commands are issued on a same command line, only the latest one will be applied\. -. -.SS "Operation mode" -. -.TP -\fB\-z\fR \fB\-\-compress\fR -Compress\. This is the default operation mode when no operation mode option is specified, no other operation mode is implied from the command name (for example, \fBunlz4\fR implies \fB\-\-decompress\fR), nor from the input file name (for example, a file extension \fB\.lz4\fR implies \fB\-\-decompress\fR by default)\. \fB\-z\fR can also be used to force compression of an already compressed \fB\.lz4\fR file\. -. -.TP -\fB\-d\fR \fB\-\-decompress\fR \fB\-\-uncompress\fR -Decompress\. \fB\-\-decompress\fR is also the default operation when the input filename has an \fB\.lz4\fR extension\. -. -.TP -\fB\-t\fR \fB\-\-test\fR -Test the integrity of compressed \fB\.lz4\fR files\. The decompressed data is discarded\. No files are created nor removed\. -. -.TP -\fB\-b#\fR -Benchmark mode, using \fB#\fR compression level\. -. -.TP -\fB\-\-list\fR -List information about \.lz4 files\. note : current implementation is limited to single\-frame \.lz4 files\. -. -.SS "Operation modifiers" -. -.TP -\fB\-#\fR -Compression level, with # being any value from 1 to 12\. Higher values trade compression speed for compression ratio\. Values above 12 are considered the same as 12\. Recommended values are 1 for fast compression (default), and 9 for high compression\. Speed/compression trade\-off will vary depending on data to compress\. Decompression speed remains fast at all settings\. -. -.TP -\fB\-\-fast[=#]\fR -Switch to ultra\-fast compression levels\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. If \fB=#\fR is not present, it defaults to \fB1\fR\. This setting overrides compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\. -. -.TP -\fB\-\-best\fR -Set highest compression level\. Same as \-12\. -. -.TP -\fB\-\-favor\-decSpeed\fR -Generate compressed data optimized for decompression speed\. Compressed data will be larger as a consequence (typically by ~0\.5%), while decompression speed will be improved by 5\-20%, depending on use cases\. This option only works in combination with very high compression levels (>=10)\. -. -.TP -\fB\-D dictionaryName\fR -Compress, decompress or benchmark using dictionary \fIdictionaryName\fR\. Compression and decompression must use the same dictionary to be compatible\. Using a different dictionary during decompression will either abort due to decompression error, or generate a checksum error\. -. -.TP -\fB\-f\fR \fB\-\-[no\-]force\fR -This option has several effects: -. -.IP -If the target file already exists, overwrite it without prompting\. -. -.IP -When used with \fB\-\-decompress\fR and \fBlz4\fR cannot recognize the type of the source file, copy the source file as is to standard output\. This allows \fBlz4cat \-\-force\fR to be used like \fBcat (1)\fR for files that have not been compressed with \fBlz4\fR\. -. -.TP -\fB\-c\fR \fB\-\-stdout\fR \fB\-\-to\-stdout\fR -Force write to standard output, even if it is the console\. -. -.TP -\fB\-m\fR \fB\-\-multiple\fR -Multiple input files\. Compressed file names will be appended a \fB\.lz4\fR suffix\. This mode also reduces notification level\. Can also be used to list multiple files\. \fBlz4 \-m\fR has a behavior equivalent to \fBgzip \-k\fR (it preserves source files by default)\. -. -.TP -\fB\-r\fR -operate recursively on directories\. This mode also sets \fB\-m\fR (multiple input files)\. -. -.TP -\fB\-B#\fR -Block size [4\-7](default : 7) -. -.br -\fB\-B4\fR= 64KB ; \fB\-B5\fR= 256KB ; \fB\-B6\fR= 1MB ; \fB\-B7\fR= 4MB -. -.TP -\fB\-BI\fR -Produce independent blocks (default) -. -.TP -\fB\-BD\fR -Blocks depend on predecessors (improves compression ratio, more noticeable on small blocks) -. -.TP -\fB\-BX\fR -Generate block checksums (default:disabled) -. -.TP -\fB\-\-[no\-]frame\-crc\fR -Select frame checksum (default:enabled) -. -.TP -\fB\-\-no\-crc\fR -Disable both frame and block checksums -. -.TP -\fB\-\-[no\-]content\-size\fR -Header includes original size (default:not present) -. -.br -Note : this option can only be activated when the original size can be determined, hence for a file\. It won\'t work with unknown source size, such as stdin or pipe\. -. -.TP -\fB\-\-[no\-]sparse\fR -Sparse mode support (default:enabled on file, disabled on stdout) -. -.TP -\fB\-l\fR -Use Legacy format (typically for Linux Kernel compression) -. -.br -Note : \fB\-l\fR is not compatible with \fB\-m\fR (\fB\-\-multiple\fR) nor \fB\-r\fR -. -.SS "Other options" -. -.TP -\fB\-v\fR \fB\-\-verbose\fR -Verbose mode -. -.TP -\fB\-q\fR \fB\-\-quiet\fR -Suppress warnings and real\-time statistics; specify twice to suppress errors too -. -.TP -\fB\-h\fR \fB\-H\fR \fB\-\-help\fR -Display help/long help and exit -. -.TP -\fB\-V\fR \fB\-\-version\fR -Display Version number and exit -. -.TP -\fB\-k\fR \fB\-\-keep\fR -Preserve source files (default behavior) -. -.TP -\fB\-\-rm\fR -Delete source files on successful compression or decompression -. -.TP -\fB\-\-\fR -Treat all subsequent arguments as files -. -.SS "Benchmark mode" -. -.TP -\fB\-b#\fR -Benchmark file(s), using # compression level -. -.TP -\fB\-e#\fR -Benchmark multiple compression levels, from b# to e# (included) -. -.TP -\fB\-i#\fR -Minimum evaluation time in seconds [1\-9] (default : 3) -. -.SH "BUGS" -Report bugs at: https://github\.com/lz4/lz4/issues -. -.SH "AUTHOR" -Yann Collet diff --git a/librocksdb-sys/lz4/programs/lz4.1.md b/librocksdb-sys/lz4/programs/lz4.1.md deleted file mode 100644 index 06c06cf..0000000 --- a/librocksdb-sys/lz4/programs/lz4.1.md +++ /dev/null @@ -1,256 +0,0 @@ -lz4(1) -- lz4, unlz4, lz4cat - Compress or decompress .lz4 files -================================================================ - -SYNOPSIS --------- - -`lz4` [*OPTIONS*] [-|INPUT-FILE] - -`unlz4` is equivalent to `lz4 -d` - -`lz4cat` is equivalent to `lz4 -dcfm` - -When writing scripts that need to decompress files, -it is recommended to always use the name `lz4` with appropriate arguments -(`lz4 -d` or `lz4 -dc`) instead of the names `unlz4` and `lz4cat`. - - -DESCRIPTION ------------ - -`lz4` is an extremely fast lossless compression algorithm, -based on **byte-aligned LZ77** family of compression scheme. -`lz4` offers compression speeds > 500 MB/s per core, -linearly scalable with multi-core CPUs. -It features an extremely fast decoder, offering speed in multiple GB/s per core, -typically reaching RAM speed limit on multi-core systems. -The native file format is the `.lz4` format. - -### Difference between lz4 and gzip - -`lz4` supports a command line syntax similar _but not identical_ to `gzip(1)`. -Differences are : - - * `lz4` compresses a single file by default (see `-m` for multiple files) - * `lz4 file1 file2` means : compress file1 _into_ file2 - * `lz4 file.lz4` will default to decompression (use `-z` to force compression) - * `lz4` preserves original files (see `--rm` to erase source file on completion) - * `lz4` shows real-time notification statistics - during compression or decompression of a single file - (use `-q` to silence them) - * When no destination is specified, result is sent on implicit output, - which depends on `stdout` status. - When `stdout` _is Not the console_, it becomes the implicit output. - Otherwise, if `stdout` is the console, the implicit output is `filename.lz4`. - * It is considered bad practice to rely on implicit output in scripts. - because the script's environment may change. - Always use explicit output in scripts. - `-c` ensures that output will be `stdout`. - Conversely, providing a destination name, or using `-m` - ensures that the output will be either the specified name, or `filename.lz4` respectively. - -Default behaviors can be modified by opt-in commands, detailed below. - - * `lz4 -m` makes it possible to provide multiple input filenames, - which will be compressed into files using suffix `.lz4`. - Progress notifications become disabled by default (use `-v` to enable them). - This mode has a behavior which more closely mimics `gzip` command line, - with the main remaining difference being that source files are preserved by default. - * Similarly, `lz4 -m -d` can decompress multiple `*.lz4` files. - * It's possible to opt-in to erase source files - on successful compression or decompression, using `--rm` command. - * Consequently, `lz4 -m --rm` behaves the same as `gzip`. - -### Concatenation of .lz4 files - -It is possible to concatenate `.lz4` files as is. -`lz4` will decompress such files as if they were a single `.lz4` file. -For example: - - lz4 file1 > foo.lz4 - lz4 file2 >> foo.lz4 - -Then `lz4cat foo.lz4` is equivalent to `cat file1 file2`. - -OPTIONS -------- - -### Short commands concatenation - -In some cases, some options can be expressed using short command `-x` -or long command `--long-word`. -Short commands can be concatenated together. -For example, `-d -c` is equivalent to `-dc`. -Long commands cannot be concatenated. They must be clearly separated by a space. - -### Multiple commands - -When multiple contradictory commands are issued on a same command line, -only the latest one will be applied. - -### Operation mode - -* `-z` `--compress`: - Compress. - This is the default operation mode when no operation mode option is - specified, no other operation mode is implied from the command name - (for example, `unlz4` implies `--decompress`), - nor from the input file name - (for example, a file extension `.lz4` implies `--decompress` by default). - `-z` can also be used to force compression of an already compressed - `.lz4` file. - -* `-d` `--decompress` `--uncompress`: - Decompress. - `--decompress` is also the default operation when the input filename has an - `.lz4` extension. - -* `-t` `--test`: - Test the integrity of compressed `.lz4` files. - The decompressed data is discarded. - No files are created nor removed. - -* `-b#`: - Benchmark mode, using `#` compression level. - -* `--list`: - List information about .lz4 files. - note : current implementation is limited to single-frame .lz4 files. - -### Operation modifiers - -* `-#`: - Compression level, with # being any value from 1 to 12. - Higher values trade compression speed for compression ratio. - Values above 12 are considered the same as 12. - Recommended values are 1 for fast compression (default), - and 9 for high compression. - Speed/compression trade-off will vary depending on data to compress. - Decompression speed remains fast at all settings. - -* `--fast[=#]`: - Switch to ultra-fast compression levels. - The higher the value, the faster the compression speed, at the cost of some compression ratio. - If `=#` is not present, it defaults to `1`. - This setting overrides compression level if one was set previously. - Similarly, if a compression level is set after `--fast`, it overrides it. - -* `--best`: - Set highest compression level. Same as -12. - -* `--favor-decSpeed`: - Generate compressed data optimized for decompression speed. - Compressed data will be larger as a consequence (typically by ~0.5%), - while decompression speed will be improved by 5-20%, depending on use cases. - This option only works in combination with very high compression levels (>=10). - -* `-D dictionaryName`: - Compress, decompress or benchmark using dictionary _dictionaryName_. - Compression and decompression must use the same dictionary to be compatible. - Using a different dictionary during decompression will either - abort due to decompression error, or generate a checksum error. - -* `-f` `--[no-]force`: - This option has several effects: - - If the target file already exists, overwrite it without prompting. - - When used with `--decompress` and `lz4` cannot recognize the type of - the source file, copy the source file as is to standard output. - This allows `lz4cat --force` to be used like `cat (1)` for files - that have not been compressed with `lz4`. - -* `-c` `--stdout` `--to-stdout`: - Force write to standard output, even if it is the console. - -* `-m` `--multiple`: - Multiple input files. - Compressed file names will be appended a `.lz4` suffix. - This mode also reduces notification level. - Can also be used to list multiple files. - `lz4 -m` has a behavior equivalent to `gzip -k` - (it preserves source files by default). - -* `-r` : - operate recursively on directories. - This mode also sets `-m` (multiple input files). - -* `-B#`: - Block size \[4-7\](default : 7)
- `-B4`= 64KB ; `-B5`= 256KB ; `-B6`= 1MB ; `-B7`= 4MB - -* `-BI`: - Produce independent blocks (default) - -* `-BD`: - Blocks depend on predecessors (improves compression ratio, more noticeable on small blocks) - -* `-BX`: - Generate block checksums (default:disabled) - -* `--[no-]frame-crc`: - Select frame checksum (default:enabled) - -* `--no-crc`: - Disable both frame and block checksums - -* `--[no-]content-size`: - Header includes original size (default:not present)
- Note : this option can only be activated when the original size can be - determined, hence for a file. It won't work with unknown source size, - such as stdin or pipe. - -* `--[no-]sparse`: - Sparse mode support (default:enabled on file, disabled on stdout) - -* `-l`: - Use Legacy format (typically for Linux Kernel compression)
- Note : `-l` is not compatible with `-m` (`--multiple`) nor `-r` - -### Other options - -* `-v` `--verbose`: - Verbose mode - -* `-q` `--quiet`: - Suppress warnings and real-time statistics; - specify twice to suppress errors too - -* `-h` `-H` `--help`: - Display help/long help and exit - -* `-V` `--version`: - Display Version number and exit - -* `-k` `--keep`: - Preserve source files (default behavior) - -* `--rm` : - Delete source files on successful compression or decompression - -* `--` : - Treat all subsequent arguments as files - - -### Benchmark mode - -* `-b#`: - Benchmark file(s), using # compression level - -* `-e#`: - Benchmark multiple compression levels, from b# to e# (included) - -* `-i#`: - Minimum evaluation time in seconds \[1-9\] (default : 3) - - -BUGS ----- - -Report bugs at: https://github.com/lz4/lz4/issues - - -AUTHOR ------- - -Yann Collet diff --git a/librocksdb-sys/lz4/programs/lz4cli.c b/librocksdb-sys/lz4/programs/lz4cli.c deleted file mode 100644 index 8c3f9fd..0000000 --- a/librocksdb-sys/lz4/programs/lz4cli.c +++ /dev/null @@ -1,784 +0,0 @@ -/* - LZ4cli - LZ4 Command Line Interface - Copyright (C) Yann Collet 2011-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -/* - Note : this is stand-alone program. - It is not part of LZ4 compression library, it is a user program of the LZ4 library. - The license of LZ4 library is BSD. - The license of xxHash library is BSD. - The license of this compression CLI program is GPLv2. -*/ - - -/**************************** -* Includes -*****************************/ -#include "platform.h" /* Compiler options, IS_CONSOLE */ -#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList */ -#include /* fprintf, getchar */ -#include /* exit, calloc, free */ -#include /* strcmp, strlen */ -#include "bench.h" /* BMK_benchFile, BMK_SetNbIterations, BMK_SetBlocksize, BMK_SetPause */ -#include "lz4io.h" /* LZ4IO_compressFilename, LZ4IO_decompressFilename, LZ4IO_compressMultipleFilenames */ -#include "lz4hc.h" /* LZ4HC_CLEVEL_MAX */ -#include "lz4.h" /* LZ4_VERSION_STRING */ - - -/***************************** -* Constants -******************************/ -#define COMPRESSOR_NAME "LZ4 command line interface" -#define AUTHOR "Yann Collet" -#define WELCOME_MESSAGE "*** %s %i-bits v%s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(void*)*8), LZ4_versionString(), AUTHOR -#define LZ4_EXTENSION ".lz4" -#define LZ4CAT "lz4cat" -#define UNLZ4 "unlz4" -#define LZ4_LEGACY "lz4c" -static int g_lz4c_legacy_commands = 0; - -#define KB *(1U<<10) -#define MB *(1U<<20) -#define GB *(1U<<30) - -#define LZ4_BLOCKSIZEID_DEFAULT 7 - - -/*-************************************ -* Macros -***************************************/ -#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } -static unsigned displayLevel = 2; /* 0 : no display ; 1: errors only ; 2 : downgradable normal ; 3 : non-downgradable normal; 4 : + information */ - - -/*-************************************ -* Exceptions -***************************************/ -#define DEBUG 0 -#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); -#define EXM_THROW(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", error); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, "\n"); \ - exit(error); \ -} - - -/*-************************************ -* Version modifiers -***************************************/ -#define DEFAULT_COMPRESSOR LZ4IO_compressFilename -#define DEFAULT_DECOMPRESSOR LZ4IO_decompressFilename -int LZ4IO_compressFilename_Legacy(const char* input_filename, const char* output_filename, int compressionlevel, const LZ4IO_prefs_t* prefs); /* hidden function */ -int LZ4IO_compressMultipleFilenames_Legacy( - const char** inFileNamesTable, int ifntSize, - const char* suffix, - int compressionLevel, const LZ4IO_prefs_t* prefs); - -/*-*************************** -* Functions -*****************************/ -static int usage(const char* exeName) -{ - DISPLAY( "Usage : \n"); - DISPLAY( " %s [arg] [input] [output] \n", exeName); - DISPLAY( "\n"); - DISPLAY( "input : a filename \n"); - DISPLAY( " with no FILE, or when FILE is - or %s, read standard input\n", stdinmark); - DISPLAY( "Arguments : \n"); - DISPLAY( " -1 : Fast compression (default) \n"); - DISPLAY( " -9 : High compression \n"); - DISPLAY( " -d : decompression (default for %s extension)\n", LZ4_EXTENSION); - DISPLAY( " -z : force compression \n"); - DISPLAY( " -D FILE: use FILE as dictionary \n"); - DISPLAY( " -f : overwrite output without prompting \n"); - DISPLAY( " -k : preserve source files(s) (default) \n"); - DISPLAY( "--rm : remove source file(s) after successful de/compression \n"); - DISPLAY( " -h/-H : display help/long help and exit \n"); - return 0; -} - -static int usage_advanced(const char* exeName) -{ - DISPLAY(WELCOME_MESSAGE); - usage(exeName); - DISPLAY( "\n"); - DISPLAY( "Advanced arguments :\n"); - DISPLAY( " -V : display Version number and exit \n"); - DISPLAY( " -v : verbose mode \n"); - DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n"); - DISPLAY( " -c : force write to standard output, even if it is the console\n"); - DISPLAY( " -t : test compressed file integrity\n"); - DISPLAY( " -m : multiple input files (implies automatic output filenames)\n"); -#ifdef UTIL_HAS_CREATEFILELIST - DISPLAY( " -r : operate recursively on directories (sets also -m) \n"); -#endif - DISPLAY( " -l : compress using Legacy format (Linux kernel compression)\n"); - DISPLAY( " -B# : cut file into blocks of size # bytes [32+] \n"); - DISPLAY( " or predefined block size [4-7] (default: 7) \n"); - DISPLAY( " -BI : Block Independence (default) \n"); - DISPLAY( " -BD : Block dependency (improves compression ratio) \n"); - DISPLAY( " -BX : enable block checksum (default:disabled) \n"); - DISPLAY( "--no-frame-crc : disable stream checksum (default:enabled) \n"); - DISPLAY( "--content-size : compressed frame includes original size (default:not present)\n"); - DISPLAY( "--list FILE : lists information about .lz4 files (useful for files compressed with --content-size flag)\n"); - DISPLAY( "--[no-]sparse : sparse mode (default:enabled on file, disabled on stdout)\n"); - DISPLAY( "--favor-decSpeed: compressed files decompress faster, but are less compressed \n"); - DISPLAY( "--fast[=#]: switch to ultra fast compression level (default: %i)\n", 1); - DISPLAY( "--best : same as -%d\n", LZ4HC_CLEVEL_MAX); - DISPLAY( "Benchmark arguments : \n"); - DISPLAY( " -b# : benchmark file(s), using # compression level (default : 1) \n"); - DISPLAY( " -e# : test all compression levels from -bX to # (default : 1)\n"); - DISPLAY( " -i# : minimum evaluation time in seconds (default : 3s) \n"); - if (g_lz4c_legacy_commands) { - DISPLAY( "Legacy arguments : \n"); - DISPLAY( " -c0 : fast compression \n"); - DISPLAY( " -c1 : high compression \n"); - DISPLAY( " -c2,-hc: very high compression \n"); - DISPLAY( " -y : overwrite output without prompting \n"); - } - return 0; -} - -static int usage_longhelp(const char* exeName) -{ - usage_advanced(exeName); - DISPLAY( "\n"); - DISPLAY( "****************************\n"); - DISPLAY( "***** Advanced comment *****\n"); - DISPLAY( "****************************\n"); - DISPLAY( "\n"); - DISPLAY( "Which values can [output] have ? \n"); - DISPLAY( "---------------------------------\n"); - DISPLAY( "[output] : a filename \n"); - DISPLAY( " '%s', or '-' for standard output (pipe mode)\n", stdoutmark); - DISPLAY( " '%s' to discard output (test mode) \n", NULL_OUTPUT); - DISPLAY( "[output] can be left empty. In this case, it receives the following value :\n"); - DISPLAY( " - if stdout is not the console, then [output] = stdout \n"); - DISPLAY( " - if stdout is console : \n"); - DISPLAY( " + for compression, output to filename%s \n", LZ4_EXTENSION); - DISPLAY( " + for decompression, output to filename without '%s'\n", LZ4_EXTENSION); - DISPLAY( " > if input filename has no '%s' extension : error \n", LZ4_EXTENSION); - DISPLAY( "\n"); - DISPLAY( "Compression levels : \n"); - DISPLAY( "---------------------\n"); - DISPLAY( "-0 ... -2 => Fast compression, all identical\n"); - DISPLAY( "-3 ... -%d => High compression; higher number == more compression but slower\n", LZ4HC_CLEVEL_MAX); - DISPLAY( "\n"); - DISPLAY( "stdin, stdout and the console : \n"); - DISPLAY( "--------------------------------\n"); - DISPLAY( "To protect the console from binary flooding (bad argument mistake)\n"); - DISPLAY( "%s will refuse to read from console, or write to console \n", exeName); - DISPLAY( "except if '-c' command is specified, to force output to console \n"); - DISPLAY( "\n"); - DISPLAY( "Simple example :\n"); - DISPLAY( "----------------\n"); - DISPLAY( "1 : compress 'filename' fast, using default output name 'filename.lz4'\n"); - DISPLAY( " %s filename\n", exeName); - DISPLAY( "\n"); - DISPLAY( "Short arguments can be aggregated. For example :\n"); - DISPLAY( "----------------------------------\n"); - DISPLAY( "2 : compress 'filename' in high compression mode, overwrite output if exists\n"); - DISPLAY( " %s -9 -f filename \n", exeName); - DISPLAY( " is equivalent to :\n"); - DISPLAY( " %s -9f filename \n", exeName); - DISPLAY( "\n"); - DISPLAY( "%s can be used in 'pure pipe mode'. For example :\n", exeName); - DISPLAY( "-------------------------------------\n"); - DISPLAY( "3 : compress data stream from 'generator', send result to 'consumer'\n"); - DISPLAY( " generator | %s | consumer \n", exeName); - if (g_lz4c_legacy_commands) { - DISPLAY( "\n"); - DISPLAY( "***** Warning ***** \n"); - DISPLAY( "Legacy arguments take precedence. Therefore : \n"); - DISPLAY( "--------------------------------- \n"); - DISPLAY( " %s -hc filename \n", exeName); - DISPLAY( "means 'compress filename in high compression mode' \n"); - DISPLAY( "It is not equivalent to : \n"); - DISPLAY( " %s -h -c filename \n", exeName); - DISPLAY( "which displays help text and exits \n"); - } - return 0; -} - -static int badusage(const char* exeName) -{ - DISPLAYLEVEL(1, "Incorrect parameters\n"); - if (displayLevel >= 1) usage(exeName); - exit(1); -} - - -static void waitEnter(void) -{ - DISPLAY("Press enter to continue...\n"); - (void)getchar(); -} - -static const char* lastNameFromPath(const char* path) -{ - const char* name = path; - if (strrchr(name, '/')) name = strrchr(name, '/') + 1; - if (strrchr(name, '\\')) name = strrchr(name, '\\') + 1; /* windows */ - return name; -} - -/*! exeNameMatch() : - @return : a non-zero value if exeName matches test, excluding the extension - */ -static int exeNameMatch(const char* exeName, const char* test) -{ - return !strncmp(exeName, test, strlen(test)) && - (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); -} - -/*! readU32FromChar() : - * @return : unsigned integer value read from input in `char` format - * allows and interprets K, KB, KiB, M, MB and MiB suffix. - * Will also modify `*stringPtr`, advancing it to position where it stopped reading. - * Note : function result can overflow if digit string > MAX_UINT */ -static unsigned readU32FromChar(const char** stringPtr) -{ - unsigned result = 0; - while ((**stringPtr >='0') && (**stringPtr <='9')) { - result *= 10; - result += (unsigned)(**stringPtr - '0'); - (*stringPtr)++ ; - } - if ((**stringPtr=='K') || (**stringPtr=='M')) { - result <<= 10; - if (**stringPtr=='M') result <<= 10; - (*stringPtr)++ ; - if (**stringPtr=='i') (*stringPtr)++; - if (**stringPtr=='B') (*stringPtr)++; - } - return result; -} - -/** longCommandWArg() : - * check if *stringPtr is the same as longCommand. - * If yes, @return 1 and advances *stringPtr to the position which immediately follows longCommand. - * @return 0 and doesn't modify *stringPtr otherwise. - */ -static int longCommandWArg(const char** stringPtr, const char* longCommand) -{ - size_t const comSize = strlen(longCommand); - int const result = !strncmp(*stringPtr, longCommand, comSize); - if (result) *stringPtr += comSize; - return result; -} - -typedef enum { om_auto, om_compress, om_decompress, om_test, om_bench, om_list } operationMode_e; - -/** determineOpMode() : - * auto-determine operation mode, based on input filename extension - * @return `om_decompress` if input filename has .lz4 extension and `om_compress` otherwise. - */ -static operationMode_e determineOpMode(const char* inputFilename) -{ - size_t const inSize = strlen(inputFilename); - size_t const extSize = strlen(LZ4_EXTENSION); - size_t const extStart= (inSize > extSize) ? inSize-extSize : 0; - if (!strcmp(inputFilename+extStart, LZ4_EXTENSION)) return om_decompress; - else return om_compress; -} - -int main(int argc, const char** argv) -{ - int i, - cLevel=1, - cLevelLast=-10000, - legacy_format=0, - forceStdout=0, - forceOverwrite=0, - main_pause=0, - multiple_inputs=0, - all_arguments_are_files=0, - operationResult=0; - operationMode_e mode = om_auto; - const char* input_filename = NULL; - const char* output_filename= NULL; - const char* dictionary_filename = NULL; - char* dynNameSpace = NULL; - const char** inFileNames = (const char**)calloc((size_t)argc, sizeof(char*)); - unsigned ifnIdx=0; - LZ4IO_prefs_t* const prefs = LZ4IO_defaultPreferences(); - const char nullOutput[] = NULL_OUTPUT; - const char extension[] = LZ4_EXTENSION; - size_t blockSize = LZ4IO_setBlockSizeID(prefs, LZ4_BLOCKSIZEID_DEFAULT); - const char* const exeName = lastNameFromPath(argv[0]); - char* fileNamesBuf = NULL; -#ifdef UTIL_HAS_CREATEFILELIST - unsigned fileNamesNb, recursive=0; -#endif - - /* Init */ - if (inFileNames==NULL) { - DISPLAY("Allocation error : not enough memory \n"); - return 1; - } - inFileNames[0] = stdinmark; - LZ4IO_setOverwrite(prefs, 0); - - /* predefined behaviors, based on binary/link name */ - if (exeNameMatch(exeName, LZ4CAT)) { - mode = om_decompress; - LZ4IO_setOverwrite(prefs, 1); - LZ4IO_setPassThrough(prefs, 1); - LZ4IO_setRemoveSrcFile(prefs, 0); - forceStdout=1; - output_filename=stdoutmark; - displayLevel=1; - multiple_inputs=1; - } - if (exeNameMatch(exeName, UNLZ4)) { mode = om_decompress; } - if (exeNameMatch(exeName, LZ4_LEGACY)) { g_lz4c_legacy_commands=1; } - - /* command switches */ - for(i=1; i='0') && (*argument<='9')) { - cLevel = (int)readU32FromChar(&argument); - argument--; - continue; - } - - - switch(argument[0]) - { - /* Display help */ - case 'V': DISPLAYOUT(WELCOME_MESSAGE); goto _cleanup; /* Version */ - case 'h': usage_advanced(exeName); goto _cleanup; - case 'H': usage_longhelp(exeName); goto _cleanup; - - case 'e': - argument++; - cLevelLast = (int)readU32FromChar(&argument); - argument--; - break; - - /* Compression (default) */ - case 'z': mode = om_compress; break; - - case 'D': - if (argument[1] == '\0') { - /* path is next arg */ - if (i + 1 == argc) { - /* there is no next arg */ - badusage(exeName); - } - dictionary_filename = argv[++i]; - } else { - /* path follows immediately */ - dictionary_filename = argument + 1; - } - /* skip to end of argument so that we jump to parsing next argument */ - argument += strlen(argument) - 1; - break; - - /* Use Legacy format (ex : Linux kernel compression) */ - case 'l': legacy_format = 1; blockSize = 8 MB; break; - - /* Decoding */ - case 'd': - if (mode != om_bench) mode = om_decompress; - BMK_setDecodeOnlyMode(1); - break; - - /* Force stdout, even if stdout==console */ - case 'c': - forceStdout=1; - output_filename=stdoutmark; - LZ4IO_setPassThrough(prefs, 1); - break; - - /* Test integrity */ - case 't': mode = om_test; break; - - /* Overwrite */ - case 'f': forceOverwrite=1; LZ4IO_setOverwrite(prefs, 1); break; - - /* Verbose mode */ - case 'v': displayLevel++; break; - - /* Quiet mode */ - case 'q': if (displayLevel) displayLevel--; break; - - /* keep source file (default anyway, so useless) (for xz/lzma compatibility) */ - case 'k': LZ4IO_setRemoveSrcFile(prefs, 0); break; - - /* Modify Block Properties */ - case 'B': - while (argument[1]!=0) { - int exitBlockProperties=0; - switch(argument[1]) - { - case 'D': LZ4IO_setBlockMode(prefs, LZ4IO_blockLinked); argument++; break; - case 'I': LZ4IO_setBlockMode(prefs, LZ4IO_blockIndependent); argument++; break; - case 'X': LZ4IO_setBlockChecksumMode(prefs, 1); argument ++; break; /* disabled by default */ - default : - if (argument[1] < '0' || argument[1] > '9') { - exitBlockProperties=1; - break; - } else { - unsigned B; - argument++; - B = readU32FromChar(&argument); - argument--; - if (B < 4) badusage(exeName); - if (B <= 7) { - blockSize = LZ4IO_setBlockSizeID(prefs, B); - BMK_setBlockSize(blockSize); - DISPLAYLEVEL(2, "using blocks of size %u KB \n", (U32)(blockSize>>10)); - } else { - if (B < 32) badusage(exeName); - blockSize = LZ4IO_setBlockSize(prefs, B); - BMK_setBlockSize(blockSize); - if (blockSize >= 1024) { - DISPLAYLEVEL(2, "using blocks of size %u KB \n", (U32)(blockSize>>10)); - } else { - DISPLAYLEVEL(2, "using blocks of size %u bytes \n", (U32)(blockSize)); - } - } - break; - } - } - if (exitBlockProperties) break; - } - break; - - /* Benchmark */ - case 'b': mode = om_bench; multiple_inputs=1; - break; - - /* hidden command : benchmark files, but do not fuse result */ - case 'S': BMK_setBenchSeparately(1); - break; - -#ifdef UTIL_HAS_CREATEFILELIST - /* recursive */ - case 'r': recursive=1; -#endif - /* fall-through */ - /* Treat non-option args as input files. See https://code.google.com/p/lz4/issues/detail?id=151 */ - case 'm': multiple_inputs=1; - break; - - /* Modify Nb Seconds (benchmark only) */ - case 'i': - { unsigned iters; - argument++; - iters = readU32FromChar(&argument); - argument--; - BMK_setNotificationLevel(displayLevel); - BMK_setNbSeconds(iters); /* notification if displayLevel >= 3 */ - } - break; - - /* Pause at the end (hidden option) */ - case 'p': main_pause=1; break; - - /* Unrecognised command */ - default : badusage(exeName); - } - } - continue; - } - - /* Store in *inFileNames[] if -m is used. */ - if (multiple_inputs) { inFileNames[ifnIdx++] = argument; continue; } - - /* original cli logic : lz4 input output */ - /* First non-option arg is input_filename. */ - if (!input_filename) { input_filename = argument; continue; } - - /* Second non-option arg is output_filename */ - if (!output_filename) { - output_filename = argument; - if (!strcmp (output_filename, nullOutput)) output_filename = nulmark; - continue; - } - - /* 3rd+ non-option arg should not exist */ - DISPLAYLEVEL(1, "%s : %s won't be used ! Do you want multiple input files (-m) ? \n", - forceOverwrite ? "Warning" : "Error", - argument); - if (!forceOverwrite) exit(1); - } - - DISPLAYLEVEL(3, WELCOME_MESSAGE); -#ifdef _POSIX_C_SOURCE - DISPLAYLEVEL(4, "_POSIX_C_SOURCE defined: %ldL\n", (long) _POSIX_C_SOURCE); -#endif -#ifdef _POSIX_VERSION - DISPLAYLEVEL(4, "_POSIX_VERSION defined: %ldL\n", (long) _POSIX_VERSION); -#endif -#ifdef PLATFORM_POSIX_VERSION - DISPLAYLEVEL(4, "PLATFORM_POSIX_VERSION defined: %ldL\n", (long) PLATFORM_POSIX_VERSION); -#endif -#ifdef _FILE_OFFSET_BITS - DISPLAYLEVEL(4, "_FILE_OFFSET_BITS defined: %ldL\n", (long) _FILE_OFFSET_BITS); -#endif - if ((mode == om_compress) || (mode == om_bench)) - DISPLAYLEVEL(4, "Blocks size : %u KB\n", (U32)(blockSize>>10)); - - if (multiple_inputs) { - input_filename = inFileNames[0]; -#ifdef UTIL_HAS_CREATEFILELIST - if (recursive) { /* at this stage, filenameTable is a list of paths, which can contain both files and directories */ - const char** extendedFileList = UTIL_createFileList(inFileNames, ifnIdx, &fileNamesBuf, &fileNamesNb); - if (extendedFileList) { - unsigned u; - for (u=0; u use stdin */ - if (!input_filename) input_filename = stdinmark; - - /* Refuse to use the console as input */ - if (!strcmp(input_filename, stdinmark) && IS_CONSOLE(stdin) ) { - DISPLAYLEVEL(1, "refusing to read from a console\n"); - exit(1); - } - - if (!strcmp(input_filename, stdinmark)) { - /* if input==stdin and no output defined, stdout becomes default output */ - if (!output_filename) output_filename = stdoutmark; - } - - /* No output filename ==> try to select one automatically (when possible) */ - while ((!output_filename) && (multiple_inputs==0)) { - if (!IS_CONSOLE(stdout) && mode != om_list) { - /* Default to stdout whenever stdout is not the console. - * Note : this policy may change in the future, therefore don't rely on it ! - * To ensure `stdout` is explicitly selected, use `-c` command flag. - * Conversely, to ensure output will not become `stdout`, use `-m` command flag */ - DISPLAYLEVEL(1, "Warning : using stdout as default output. Do not rely on this behavior: use explicit `-c` instead ! \n"); - output_filename = stdoutmark; - break; - } - if (mode == om_auto) { /* auto-determine compression or decompression, based on file extension */ - mode = determineOpMode(input_filename); - } - if (mode == om_compress) { /* compression to file */ - size_t const l = strlen(input_filename); - dynNameSpace = (char*)calloc(1,l+5); - if (dynNameSpace==NULL) { perror(exeName); exit(1); } - strcpy(dynNameSpace, input_filename); - strcat(dynNameSpace, LZ4_EXTENSION); - output_filename = dynNameSpace; - DISPLAYLEVEL(2, "Compressed filename will be : %s \n", output_filename); - break; - } - if (mode == om_decompress) {/* decompress to file (automatic output name only works if input filename has correct format extension) */ - size_t outl; - size_t const inl = strlen(input_filename); - dynNameSpace = (char*)calloc(1,inl+1); - if (dynNameSpace==NULL) { perror(exeName); exit(1); } - strcpy(dynNameSpace, input_filename); - outl = inl; - if (inl>4) - while ((outl >= inl-4) && (input_filename[outl] == extension[outl-inl+4])) dynNameSpace[outl--]=0; - if (outl != inl-5) { DISPLAYLEVEL(1, "Cannot determine an output filename \n"); badusage(exeName); } - output_filename = dynNameSpace; - DISPLAYLEVEL(2, "Decoding file %s \n", output_filename); - } - break; - } - - if (mode == om_list) { - if (!multiple_inputs) inFileNames[ifnIdx++] = input_filename; - } else { - if (!multiple_inputs) assert(output_filename != NULL); - } - /* when multiple_inputs==1, output_filename may simply be useless, - * however, output_filename must be !NULL for next strcmp() tests */ - if (!output_filename) output_filename = "*\\dummy^!//"; - - /* Check if output is defined as console; trigger an error in this case */ - if ( !strcmp(output_filename,stdoutmark) - && mode != om_list - && IS_CONSOLE(stdout) - && !forceStdout) { - DISPLAYLEVEL(1, "refusing to write to console without -c \n"); - exit(1); - } - /* Downgrade notification level in stdout and multiple file mode */ - if (!strcmp(output_filename,stdoutmark) && (displayLevel==2)) displayLevel=1; - if ((multiple_inputs) && (displayLevel==2)) displayLevel=1; - - /* Auto-determine compression or decompression, based on file extension */ - if (mode == om_auto) { - mode = determineOpMode(input_filename); - } - - /* IO Stream/File */ - LZ4IO_setNotificationLevel((int)displayLevel); - if (ifnIdx == 0) multiple_inputs = 0; - if (mode == om_decompress) { - if (multiple_inputs) { - const char* dec_extension = LZ4_EXTENSION; - if (!strcmp(output_filename, stdoutmark)) dec_extension = stdoutmark; - if (!strcmp(output_filename, nulmark)) dec_extension = nulmark; - assert(ifnIdx < INT_MAX); - operationResult = LZ4IO_decompressMultipleFilenames(inFileNames, (int)ifnIdx, dec_extension, prefs); - } else { - operationResult = DEFAULT_DECOMPRESSOR(input_filename, output_filename, prefs); - } - } else if (mode == om_list){ - operationResult = LZ4IO_displayCompressedFilesInfo(inFileNames, ifnIdx); - } else { /* compression is default action */ - if (legacy_format) { - DISPLAYLEVEL(3, "! Generating LZ4 Legacy format (deprecated) ! \n"); - if(multiple_inputs){ - const char* const leg_extension = !strcmp(output_filename,stdoutmark) ? stdoutmark : LZ4_EXTENSION; - LZ4IO_compressMultipleFilenames_Legacy(inFileNames, (int)ifnIdx, leg_extension, cLevel, prefs); - } else { - LZ4IO_compressFilename_Legacy(input_filename, output_filename, cLevel, prefs); - } - } else { - if (multiple_inputs) { - const char* const comp_extension = !strcmp(output_filename,stdoutmark) ? stdoutmark : LZ4_EXTENSION; - assert(ifnIdx <= INT_MAX); - operationResult = LZ4IO_compressMultipleFilenames(inFileNames, (int)ifnIdx, comp_extension, cLevel, prefs); - } else { - operationResult = DEFAULT_COMPRESSOR(input_filename, output_filename, cLevel, prefs); - } } } - -_cleanup: - if (main_pause) waitEnter(); - free(dynNameSpace); - free(fileNamesBuf); - LZ4IO_freePreferences(prefs); - free((void*)inFileNames); - return operationResult; -} diff --git a/librocksdb-sys/lz4/programs/lz4io.c b/librocksdb-sys/lz4/programs/lz4io.c deleted file mode 100644 index 8b70b91..0000000 --- a/librocksdb-sys/lz4/programs/lz4io.c +++ /dev/null @@ -1,1752 +0,0 @@ -/* - LZ4io.c - LZ4 File/Stream Interface - Copyright (C) Yann Collet 2011-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -/* - Note : this is stand-alone program. - It is not part of LZ4 compression library, it is a user code of the LZ4 library. - - The license of LZ4 library is BSD. - - The license of xxHash library is BSD. - - The license of this source file is GPLv2. -*/ - - -/*-************************************ -* Compiler options -**************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif -#if defined(__MINGW32__) && !defined(_POSIX_SOURCE) -# define _POSIX_SOURCE 1 /* disable %llu warnings with MinGW on Windows */ -#endif - - -/***************************** -* Includes -*****************************/ -#include "platform.h" /* Large File Support, SET_BINARY_MODE, SET_SPARSE_FILE_MODE, PLATFORM_POSIX_VERSION, __64BIT__ */ -#include "util.h" /* UTIL_getFileStat, UTIL_setFileStat */ -#include /* fprintf, fopen, fread, stdin, stdout, fflush, getchar */ -#include /* malloc, free */ -#include /* strerror, strcmp, strlen */ -#include /* clock */ -#include /* stat64 */ -#include /* stat64 */ -#include "lz4.h" /* still required for legacy format */ -#include "lz4hc.h" /* still required for legacy format */ -#define LZ4F_STATIC_LINKING_ONLY -#include "lz4frame.h" -#include "lz4io.h" - - -/***************************** -* Constants -*****************************/ -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define _1BIT 0x01 -#define _2BITS 0x03 -#define _3BITS 0x07 -#define _4BITS 0x0F -#define _8BITS 0xFF - -#define MAGICNUMBER_SIZE 4 -#define LZ4IO_MAGICNUMBER 0x184D2204 -#define LZ4IO_SKIPPABLE0 0x184D2A50 -#define LZ4IO_SKIPPABLEMASK 0xFFFFFFF0 -#define LEGACY_MAGICNUMBER 0x184C2102 - -#define CACHELINE 64 -#define LEGACY_BLOCKSIZE (8 MB) -#define MIN_STREAM_BUFSIZE (192 KB) -#define LZ4IO_BLOCKSIZEID_DEFAULT 7 -#define LZ4_MAX_DICT_SIZE (64 KB) - - -/************************************** -* Macros -**************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } -static int g_displayLevel = 0; /* 0 : no display ; 1: errors ; 2 : + result + interaction + warnings ; 3 : + progression; 4 : + information */ - -#define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \ - if ( ((clock() - g_time) > refreshRate) \ - || (g_displayLevel>=4) ) { \ - g_time = clock(); \ - DISPLAY(__VA_ARGS__); \ - if (g_displayLevel>=4) fflush(stderr); \ - } } -static const clock_t refreshRate = CLOCKS_PER_SEC / 6; -static clock_t g_time = 0; - -#define LZ4IO_STATIC_ASSERT(c) { enum { LZ4IO_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ - - -/************************************** -* Exceptions -***************************************/ -#ifndef DEBUG -# define DEBUG 0 -#endif -#define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); -#define END_PROCESS(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", error); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - exit(error); \ -} - - -/* ************************************************** */ -/* ****************** Parameters ******************** */ -/* ************************************************** */ - -struct LZ4IO_prefs_s { - int passThrough; - int overwrite; - int testMode; - int blockSizeId; - size_t blockSize; - int blockChecksum; - int streamChecksum; - int blockIndependence; - int sparseFileSupport; - int contentSizeFlag; - int useDictionary; - unsigned favorDecSpeed; - const char* dictionaryFilename; - int removeSrcFile; -}; - -LZ4IO_prefs_t* LZ4IO_defaultPreferences(void) -{ - LZ4IO_prefs_t* const ret = (LZ4IO_prefs_t*)malloc(sizeof(*ret)); - if (!ret) END_PROCESS(21, "Allocation error : not enough memory"); - ret->passThrough = 0; - ret->overwrite = 1; - ret->testMode = 0; - ret->blockSizeId = LZ4IO_BLOCKSIZEID_DEFAULT; - ret->blockSize = 0; - ret->blockChecksum = 0; - ret->streamChecksum = 1; - ret->blockIndependence = 1; - ret->sparseFileSupport = 1; - ret->contentSizeFlag = 0; - ret->useDictionary = 0; - ret->favorDecSpeed = 0; - ret->dictionaryFilename = NULL; - ret->removeSrcFile = 0; - return ret; -} - -void LZ4IO_freePreferences(LZ4IO_prefs_t* prefs) -{ - free(prefs); -} - - -int LZ4IO_setDictionaryFilename(LZ4IO_prefs_t* const prefs, const char* dictionaryFilename) -{ - prefs->dictionaryFilename = dictionaryFilename; - prefs->useDictionary = dictionaryFilename != NULL; - return prefs->useDictionary; -} - -/* Default setting : passThrough = 0; return : passThrough mode (0/1) */ -int LZ4IO_setPassThrough(LZ4IO_prefs_t* const prefs, int yes) -{ - prefs->passThrough = (yes!=0); - return prefs->passThrough; -} - - -/* Default setting : overwrite = 1; return : overwrite mode (0/1) */ -int LZ4IO_setOverwrite(LZ4IO_prefs_t* const prefs, int yes) -{ - prefs->overwrite = (yes!=0); - return prefs->overwrite; -} - -/* Default setting : testMode = 0; return : testMode (0/1) */ -int LZ4IO_setTestMode(LZ4IO_prefs_t* const prefs, int yes) -{ - prefs->testMode = (yes!=0); - return prefs->testMode; -} - -/* blockSizeID : valid values : 4-5-6-7 */ -size_t LZ4IO_setBlockSizeID(LZ4IO_prefs_t* const prefs, unsigned bsid) -{ - static const size_t blockSizeTable[] = { 64 KB, 256 KB, 1 MB, 4 MB }; - static const unsigned minBlockSizeID = 4; - static const unsigned maxBlockSizeID = 7; - if ((bsid < minBlockSizeID) || (bsid > maxBlockSizeID)) return 0; - prefs->blockSizeId = (int)bsid; - prefs->blockSize = blockSizeTable[(unsigned)prefs->blockSizeId-minBlockSizeID]; - return prefs->blockSize; -} - -size_t LZ4IO_setBlockSize(LZ4IO_prefs_t* const prefs, size_t blockSize) -{ - static const size_t minBlockSize = 32; - static const size_t maxBlockSize = 4 MB; - unsigned bsid = 0; - if (blockSize < minBlockSize) blockSize = minBlockSize; - if (blockSize > maxBlockSize) blockSize = maxBlockSize; - prefs->blockSize = blockSize; - blockSize--; - /* find which of { 64k, 256k, 1MB, 4MB } is closest to blockSize */ - while (blockSize >>= 2) - bsid++; - if (bsid < 7) bsid = 7; - prefs->blockSizeId = (int)(bsid-3); - return prefs->blockSize; -} - -/* Default setting : 1 == independent blocks */ -int LZ4IO_setBlockMode(LZ4IO_prefs_t* const prefs, LZ4IO_blockMode_t blockMode) -{ - prefs->blockIndependence = (blockMode == LZ4IO_blockIndependent); - return prefs->blockIndependence; -} - -/* Default setting : 0 == no block checksum */ -int LZ4IO_setBlockChecksumMode(LZ4IO_prefs_t* const prefs, int enable) -{ - prefs->blockChecksum = (enable != 0); - return prefs->blockChecksum; -} - -/* Default setting : 1 == checksum enabled */ -int LZ4IO_setStreamChecksumMode(LZ4IO_prefs_t* const prefs, int enable) -{ - prefs->streamChecksum = (enable != 0); - return prefs->streamChecksum; -} - -/* Default setting : 0 (no notification) */ -int LZ4IO_setNotificationLevel(int level) -{ - g_displayLevel = level; - return g_displayLevel; -} - -/* Default setting : 1 (auto: enabled on file, disabled on stdout) */ -int LZ4IO_setSparseFile(LZ4IO_prefs_t* const prefs, int enable) -{ - prefs->sparseFileSupport = 2*(enable!=0); /* 2==force enable */ - return prefs->sparseFileSupport; -} - -/* Default setting : 0 (disabled) */ -int LZ4IO_setContentSize(LZ4IO_prefs_t* const prefs, int enable) -{ - prefs->contentSizeFlag = (enable!=0); - return prefs->contentSizeFlag; -} - -/* Default setting : 0 (disabled) */ -void LZ4IO_favorDecSpeed(LZ4IO_prefs_t* const prefs, int favor) -{ - prefs->favorDecSpeed = (favor!=0); -} - -void LZ4IO_setRemoveSrcFile(LZ4IO_prefs_t* const prefs, unsigned flag) -{ - prefs->removeSrcFile = (flag>0); -} - - - -/* ************************************************************************ ** -** ********************** String functions ********************* ** -** ************************************************************************ */ - -static int LZ4IO_isDevNull(const char* s) -{ - return UTIL_sameString(s, nulmark); -} - -static int LZ4IO_isStdin(const char* s) -{ - return UTIL_sameString(s, stdinmark); -} - -static int LZ4IO_isStdout(const char* s) -{ - return UTIL_sameString(s, stdoutmark); -} - - -/* ************************************************************************ ** -** ********************** LZ4 File / Pipe compression ********************* ** -** ************************************************************************ */ - -static int LZ4IO_isSkippableMagicNumber(unsigned int magic) { - return (magic & LZ4IO_SKIPPABLEMASK) == LZ4IO_SKIPPABLE0; -} - - -/** LZ4IO_openSrcFile() : - * condition : `srcFileName` must be non-NULL. - * @result : FILE* to `dstFileName`, or NULL if it fails */ -static FILE* LZ4IO_openSrcFile(const char* srcFileName) -{ - FILE* f; - - if (LZ4IO_isStdin(srcFileName)) { - DISPLAYLEVEL(4,"Using stdin for input \n"); - f = stdin; - SET_BINARY_MODE(stdin); - } else { - f = fopen(srcFileName, "rb"); - if (f==NULL) DISPLAYLEVEL(1, "%s: %s \n", srcFileName, strerror(errno)); - } - - return f; -} - -/** FIO_openDstFile() : - * prefs is writable, because sparseFileSupport might be updated. - * condition : `dstFileName` must be non-NULL. - * @result : FILE* to `dstFileName`, or NULL if it fails */ -static FILE* LZ4IO_openDstFile(const char* dstFileName, const LZ4IO_prefs_t* const prefs) -{ - FILE* f; - assert(dstFileName != NULL); - - if (LZ4IO_isStdout(dstFileName)) { - DISPLAYLEVEL(4, "Using stdout for output \n"); - f = stdout; - SET_BINARY_MODE(stdout); - if (prefs->sparseFileSupport==1) { - DISPLAYLEVEL(4, "Sparse File Support automatically disabled on stdout ;" - " to force-enable it, add --sparse command \n"); - } - } else { - if (!prefs->overwrite && !LZ4IO_isDevNull(dstFileName)) { - /* Check if destination file already exists */ - FILE* const testf = fopen( dstFileName, "rb" ); - if (testf != NULL) { /* dest exists, prompt for overwrite authorization */ - fclose(testf); - if (g_displayLevel <= 1) { /* No interaction possible */ - DISPLAY("%s already exists; not overwritten \n", dstFileName); - return NULL; - } - DISPLAY("%s already exists; do you want to overwrite (y/N) ? ", dstFileName); - { int ch = getchar(); - if ((ch!='Y') && (ch!='y')) { - DISPLAY(" not overwritten \n"); - return NULL; - } - while ((ch!=EOF) && (ch!='\n')) ch = getchar(); /* flush rest of input line */ - } } } - f = fopen( dstFileName, "wb" ); - if (f==NULL) DISPLAYLEVEL(1, "%s: %s\n", dstFileName, strerror(errno)); - } - - /* sparse file */ - { int const sparseMode = (prefs->sparseFileSupport - (f==stdout)) > 0; - if (f && sparseMode) { SET_SPARSE_FILE_MODE(f); } - } - - return f; -} - - - -/*************************************** -* Legacy Compression -***************************************/ - -/* Size in bytes of a legacy block header in little-endian format */ -#define LZ4IO_LEGACY_BLOCK_HEADER_SIZE 4 -#define LZ4IO_LEGACY_BLOCK_SIZE_MAX (8 MB) - -/* unoptimized version; solves endianness & alignment issues */ -static void LZ4IO_writeLE32 (void* p, unsigned value32) -{ - unsigned char* const dstPtr = (unsigned char*)p; - dstPtr[0] = (unsigned char)value32; - dstPtr[1] = (unsigned char)(value32 >> 8); - dstPtr[2] = (unsigned char)(value32 >> 16); - dstPtr[3] = (unsigned char)(value32 >> 24); -} - -static int LZ4IO_LZ4_compress(const char* src, char* dst, int srcSize, int dstSize, int cLevel) -{ - (void)cLevel; - return LZ4_compress_fast(src, dst, srcSize, dstSize, 1); -} - -/* LZ4IO_compressFilename_Legacy : - * This function is intentionally "hidden" (not published in .h) - * It generates compressed streams using the old 'legacy' format */ -int LZ4IO_compressFilename_Legacy(const char* input_filename, const char* output_filename, - int compressionlevel, const LZ4IO_prefs_t* prefs) -{ - typedef int (*compress_f)(const char* src, char* dst, int srcSize, int dstSize, int cLevel); - compress_f const compressionFunction = (compressionlevel < 3) ? LZ4IO_LZ4_compress : LZ4_compress_HC; - unsigned long long filesize = 0; - unsigned long long compressedfilesize = MAGICNUMBER_SIZE; - char* in_buff; - char* out_buff; - const int outBuffSize = LZ4_compressBound(LEGACY_BLOCKSIZE); - FILE* const finput = LZ4IO_openSrcFile(input_filename); - FILE* foutput; - clock_t clockEnd; - - /* Init */ - clock_t const clockStart = clock(); - if (finput == NULL) - END_PROCESS(20, "%s : open file error ", input_filename); - - foutput = LZ4IO_openDstFile(output_filename, prefs); - if (foutput == NULL) { - fclose(finput); - END_PROCESS(20, "%s : open file error ", input_filename); - } - - /* Allocate Memory */ - in_buff = (char*)malloc(LEGACY_BLOCKSIZE); - out_buff = (char*)malloc((size_t)outBuffSize + 4); - if (!in_buff || !out_buff) - END_PROCESS(21, "Allocation error : not enough memory"); - - /* Write Archive Header */ - LZ4IO_writeLE32(out_buff, LEGACY_MAGICNUMBER); - if (fwrite(out_buff, 1, MAGICNUMBER_SIZE, foutput) != MAGICNUMBER_SIZE) - END_PROCESS(22, "Write error : cannot write header"); - - /* Main Loop */ - while (1) { - int outSize; - /* Read Block */ - size_t const inSize = fread(in_buff, (size_t)1, (size_t)LEGACY_BLOCKSIZE, finput); - if (inSize == 0) break; - assert(inSize <= LEGACY_BLOCKSIZE); - filesize += inSize; - - /* Compress Block */ - outSize = compressionFunction(in_buff, out_buff+4, (int)inSize, outBuffSize, compressionlevel); - assert(outSize >= 0); - compressedfilesize += (unsigned long long)outSize+4; - DISPLAYUPDATE(2, "\rRead : %i MiB ==> %.2f%% ", - (int)(filesize>>20), (double)compressedfilesize/filesize*100); - - /* Write Block */ - assert(outSize > 0); - assert(outSize < outBuffSize); - LZ4IO_writeLE32(out_buff, (unsigned)outSize); - if (fwrite(out_buff, 1, (size_t)outSize+4, foutput) != (size_t)(outSize+4)) { - END_PROCESS(24, "Write error : cannot write compressed block"); - } } - if (ferror(finput)) END_PROCESS(24, "Error while reading %s ", input_filename); - - /* Status */ - clockEnd = clock(); - clockEnd += (clockEnd==clockStart); /* avoid division by zero (speed) */ - filesize += !filesize; /* avoid division by zero (ratio) */ - DISPLAYLEVEL(2, "\r%79s\r", ""); /* blank line */ - DISPLAYLEVEL(2,"Compressed %llu bytes into %llu bytes ==> %.2f%%\n", - filesize, compressedfilesize, (double)compressedfilesize / filesize * 100); - { double const seconds = (double)(clockEnd - clockStart) / CLOCKS_PER_SEC; - DISPLAYLEVEL(4,"Done in %.2f s ==> %.2f MiB/s\n", seconds, - (double)filesize / seconds / 1024 / 1024); - } - - /* Close & Free */ - free(in_buff); - free(out_buff); - fclose(finput); - if (!LZ4IO_isStdout(output_filename)) fclose(foutput); /* do not close stdout */ - - return 0; -} - -#define FNSPACE 30 -/* LZ4IO_compressMultipleFilenames_Legacy : - * This function is intentionally "hidden" (not published in .h) - * It generates multiple compressed streams using the old 'legacy' format */ -int LZ4IO_compressMultipleFilenames_Legacy( - const char** inFileNamesTable, int ifntSize, - const char* suffix, - int compressionLevel, const LZ4IO_prefs_t* prefs) -{ - int i; - int missed_files = 0; - char* dstFileName = (char*)malloc(FNSPACE); - size_t ofnSize = FNSPACE; - const size_t suffixSize = strlen(suffix); - - if (dstFileName == NULL) return ifntSize; /* not enough memory */ - - /* loop on each file */ - for (i=0; i0); - - if (dictLen > LZ4_MAX_DICT_SIZE) { - dictLen = LZ4_MAX_DICT_SIZE; - } - - *dictSize = dictLen; - - dictStart = (circularBufSize + dictEnd - dictLen) % circularBufSize; - - if (dictStart == 0) { - /* We're in the simple case where the dict starts at the beginning of our circular buffer. */ - dictBuf = circularBuf; - circularBuf = NULL; - } else { - /* Otherwise, we will alloc a new buffer and copy our dict into that. */ - dictBuf = (char *)malloc(dictLen ? dictLen : 1); - if (!dictBuf) END_PROCESS(28, "Allocation error : not enough memory"); - - memcpy(dictBuf, circularBuf + dictStart, circularBufSize - dictStart); - memcpy(dictBuf + circularBufSize - dictStart, circularBuf, dictLen - (circularBufSize - dictStart)); - } - - fclose(dictFile); - free(circularBuf); - - return dictBuf; -} - -static LZ4F_CDict* LZ4IO_createCDict(const LZ4IO_prefs_t* const prefs) -{ - size_t dictionarySize; - void* dictionaryBuffer; - LZ4F_CDict* cdict; - if (!prefs->useDictionary) return NULL; - dictionaryBuffer = LZ4IO_createDict(&dictionarySize, prefs->dictionaryFilename); - if (!dictionaryBuffer) END_PROCESS(29, "Dictionary error : could not create dictionary"); - cdict = LZ4F_createCDict(dictionaryBuffer, dictionarySize); - free(dictionaryBuffer); - return cdict; -} - -static cRess_t LZ4IO_createCResources(const LZ4IO_prefs_t* const prefs) -{ - const size_t blockSize = prefs->blockSize; - cRess_t ress; - - LZ4F_errorCode_t const errorCode = LZ4F_createCompressionContext(&(ress.ctx), LZ4F_VERSION); - if (LZ4F_isError(errorCode)) END_PROCESS(30, "Allocation error : can't create LZ4F context : %s", LZ4F_getErrorName(errorCode)); - - /* Allocate Memory */ - ress.srcBuffer = malloc(blockSize); - ress.srcBufferSize = blockSize; - ress.dstBufferSize = LZ4F_compressFrameBound(blockSize, NULL); /* cover worst case */ - ress.dstBuffer = malloc(ress.dstBufferSize); - if (!ress.srcBuffer || !ress.dstBuffer) END_PROCESS(31, "Allocation error : not enough memory"); - - ress.cdict = LZ4IO_createCDict(prefs); - - return ress; -} - -static void LZ4IO_freeCResources(cRess_t ress) -{ - free(ress.srcBuffer); - free(ress.dstBuffer); - - LZ4F_freeCDict(ress.cdict); - ress.cdict = NULL; - - { LZ4F_errorCode_t const errorCode = LZ4F_freeCompressionContext(ress.ctx); - if (LZ4F_isError(errorCode)) END_PROCESS(35, "Error : can't free LZ4F context resource : %s", LZ4F_getErrorName(errorCode)); } -} - -/* - * LZ4IO_compressFilename_extRess() - * result : 0 : compression completed correctly - * 1 : missing or pb opening srcFileName - */ -static int -LZ4IO_compressFilename_extRess(cRess_t ress, - const char* srcFileName, const char* dstFileName, - int compressionLevel, const LZ4IO_prefs_t* const io_prefs) -{ - unsigned long long filesize = 0; - unsigned long long compressedfilesize = 0; - FILE* dstFile; - void* const srcBuffer = ress.srcBuffer; - void* const dstBuffer = ress.dstBuffer; - const size_t dstBufferSize = ress.dstBufferSize; - const size_t blockSize = io_prefs->blockSize; - size_t readSize; - LZ4F_compressionContext_t ctx = ress.ctx; /* just a pointer */ - LZ4F_preferences_t prefs; - - /* Init */ - FILE* const srcFile = LZ4IO_openSrcFile(srcFileName); - if (srcFile == NULL) return 1; - dstFile = LZ4IO_openDstFile(dstFileName, io_prefs); - if (dstFile == NULL) { fclose(srcFile); return 1; } - memset(&prefs, 0, sizeof(prefs)); - - /* Set compression parameters */ - prefs.autoFlush = 1; - prefs.compressionLevel = compressionLevel; - prefs.frameInfo.blockMode = (LZ4F_blockMode_t)io_prefs->blockIndependence; - prefs.frameInfo.blockSizeID = (LZ4F_blockSizeID_t)io_prefs->blockSizeId; - prefs.frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)io_prefs->blockChecksum; - prefs.frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)io_prefs->streamChecksum; - prefs.favorDecSpeed = io_prefs->favorDecSpeed; - if (io_prefs->contentSizeFlag) { - U64 const fileSize = UTIL_getOpenFileSize(srcFile); - prefs.frameInfo.contentSize = fileSize; /* == 0 if input == stdin */ - if (fileSize==0) - DISPLAYLEVEL(3, "Warning : cannot determine input content size \n"); - } - - /* read first block */ - readSize = fread(srcBuffer, (size_t)1, blockSize, srcFile); - if (ferror(srcFile)) END_PROCESS(40, "Error reading %s ", srcFileName); - filesize += readSize; - - /* single-block file */ - if (readSize < blockSize) { - /* Compress in single pass */ - size_t const cSize = LZ4F_compressFrame_usingCDict(ctx, dstBuffer, dstBufferSize, srcBuffer, readSize, ress.cdict, &prefs); - if (LZ4F_isError(cSize)) - END_PROCESS(41, "Compression failed : %s", LZ4F_getErrorName(cSize)); - compressedfilesize = cSize; - DISPLAYUPDATE(2, "\rRead : %u MiB ==> %.2f%% ", - (unsigned)(filesize>>20), (double)compressedfilesize/(filesize+!filesize)*100); /* avoid division by zero */ - - /* Write Block */ - if (fwrite(dstBuffer, 1, cSize, dstFile) != cSize) { - END_PROCESS(42, "Write error : failed writing single-block compressed frame"); - } } - - else - - /* multiple-blocks file */ - { - /* Write Frame Header */ - size_t const headerSize = LZ4F_compressBegin_usingCDict(ctx, dstBuffer, dstBufferSize, ress.cdict, &prefs); - if (LZ4F_isError(headerSize)) END_PROCESS(43, "File header generation failed : %s", LZ4F_getErrorName(headerSize)); - if (fwrite(dstBuffer, 1, headerSize, dstFile) != headerSize) - END_PROCESS(44, "Write error : cannot write header"); - compressedfilesize += headerSize; - - /* Main Loop - one block at a time */ - while (readSize>0) { - size_t const outSize = LZ4F_compressUpdate(ctx, dstBuffer, dstBufferSize, srcBuffer, readSize, NULL); - if (LZ4F_isError(outSize)) - END_PROCESS(45, "Compression failed : %s", LZ4F_getErrorName(outSize)); - compressedfilesize += outSize; - DISPLAYUPDATE(2, "\rRead : %u MiB ==> %.2f%% ", - (unsigned)(filesize>>20), (double)compressedfilesize/filesize*100); - - /* Write Block */ - if (fwrite(dstBuffer, 1, outSize, dstFile) != outSize) - END_PROCESS(46, "Write error : cannot write compressed block"); - - /* Read next block */ - readSize = fread(srcBuffer, (size_t)1, (size_t)blockSize, srcFile); - filesize += readSize; - } - if (ferror(srcFile)) END_PROCESS(47, "Error reading %s ", srcFileName); - - /* End of Frame mark */ - { size_t const endSize = LZ4F_compressEnd(ctx, dstBuffer, dstBufferSize, NULL); - if (LZ4F_isError(endSize)) - END_PROCESS(48, "End of frame error : %s", LZ4F_getErrorName(endSize)); - if (fwrite(dstBuffer, 1, endSize, dstFile) != endSize) - END_PROCESS(49, "Write error : cannot write end of frame"); - compressedfilesize += endSize; - } } - - /* Release file handlers */ - fclose (srcFile); - if (!LZ4IO_isStdout(dstFileName)) fclose(dstFile); /* do not close stdout */ - - /* Copy owner, file permissions and modification time */ - { stat_t statbuf; - if (!LZ4IO_isStdin(srcFileName) - && !LZ4IO_isStdout(dstFileName) - && !LZ4IO_isDevNull(dstFileName) - && UTIL_getFileStat(srcFileName, &statbuf)) { - UTIL_setFileStat(dstFileName, &statbuf); - } } - - if (io_prefs->removeSrcFile) { /* remove source file : --rm */ - if (remove(srcFileName)) - END_PROCESS(50, "Remove error : %s: %s", srcFileName, strerror(errno)); - } - - /* Final Status */ - DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2, "Compressed %llu bytes into %llu bytes ==> %.2f%%\n", - filesize, compressedfilesize, - (double)compressedfilesize / (filesize + !filesize /* avoid division by zero */ ) * 100); - - return 0; -} - - -int LZ4IO_compressFilename(const char* srcFileName, const char* dstFileName, int compressionLevel, const LZ4IO_prefs_t* prefs) -{ - UTIL_time_t const timeStart = UTIL_getTime(); - clock_t const cpuStart = clock(); - cRess_t const ress = LZ4IO_createCResources(prefs); - - int const result = LZ4IO_compressFilename_extRess(ress, srcFileName, dstFileName, compressionLevel, prefs); - - /* Free resources */ - LZ4IO_freeCResources(ress); - - /* Final Status */ - { clock_t const cpuEnd = clock(); - double const cpuLoad_s = (double)(cpuEnd - cpuStart) / CLOCKS_PER_SEC; - U64 const timeLength_ns = UTIL_clockSpanNano(timeStart); - double const timeLength_s = (double)timeLength_ns / 1000000000; - DISPLAYLEVEL(4, "Completed in %.2f sec (cpu load : %.0f%%)\n", - timeLength_s, (cpuLoad_s / timeLength_s) * 100); - } - - return result; -} - - -int LZ4IO_compressMultipleFilenames( - const char** inFileNamesTable, int ifntSize, - const char* suffix, - int compressionLevel, - const LZ4IO_prefs_t* prefs) -{ - int i; - int missed_files = 0; - char* dstFileName = (char*)malloc(FNSPACE); - size_t ofnSize = FNSPACE; - const size_t suffixSize = strlen(suffix); - cRess_t ress; - - if (dstFileName == NULL) return ifntSize; /* not enough memory */ - ress = LZ4IO_createCResources(prefs); - - /* loop on each file */ - for (i=0; i compress into a file => generate its name */ - if (ofnSize <= ifnSize+suffixSize+1) { - free(dstFileName); - ofnSize = ifnSize + 20; - dstFileName = (char*)malloc(ofnSize); - if (dstFileName==NULL) { - LZ4IO_freeCResources(ress); - return ifntSize; - } } - strcpy(dstFileName, inFileNamesTable[i]); - strcat(dstFileName, suffix); - - missed_files += LZ4IO_compressFilename_extRess(ress, - inFileNamesTable[i], dstFileName, - compressionLevel, prefs); - } - - /* Close & Free */ - LZ4IO_freeCResources(ress); - free(dstFileName); - - return missed_files; -} - - -/* ********************************************************************* */ -/* ********************** LZ4 file-stream Decompression **************** */ -/* ********************************************************************* */ - -/* It's presumed that s points to a memory space of size >= 4 */ -static unsigned LZ4IO_readLE32 (const void* s) -{ - const unsigned char* const srcPtr = (const unsigned char*)s; - unsigned value32 = srcPtr[0]; - value32 += (unsigned)srcPtr[1] << 8; - value32 += (unsigned)srcPtr[2] << 16; - value32 += (unsigned)srcPtr[3] << 24; - return value32; -} - - -static unsigned -LZ4IO_fwriteSparse(FILE* file, - const void* buffer, size_t bufferSize, - int sparseFileSupport, - unsigned storedSkips) -{ - const size_t sizeT = sizeof(size_t); - const size_t maskT = sizeT -1 ; - const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ - const size_t* ptrT = bufferT; - size_t bufferSizeT = bufferSize / sizeT; - const size_t* const bufferTEnd = bufferT + bufferSizeT; - const size_t segmentSizeT = (32 KB) / sizeT; - int const sparseMode = (sparseFileSupport - (file==stdout)) > 0; - - if (!sparseMode) { /* normal write */ - size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); - if (sizeCheck != bufferSize) END_PROCESS(70, "Write error : cannot write decoded block"); - return 0; - } - - /* avoid int overflow */ - if (storedSkips > 1 GB) { - int const seekResult = UTIL_fseek(file, 1 GB, SEEK_CUR); - if (seekResult != 0) END_PROCESS(71, "1 GB skip error (sparse file support)"); - storedSkips -= 1 GB; - } - - while (ptrT < bufferTEnd) { - size_t seg0SizeT = segmentSizeT; - size_t nb0T; - - /* count leading zeros */ - if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; - bufferSizeT -= seg0SizeT; - for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; - storedSkips += (unsigned)(nb0T * sizeT); - - if (nb0T != seg0SizeT) { /* not all 0s */ - errno = 0; - { int const seekResult = UTIL_fseek(file, storedSkips, SEEK_CUR); - if (seekResult) END_PROCESS(72, "Sparse skip error(%d): %s ; try --no-sparse", (int)errno, strerror(errno)); - } - storedSkips = 0; - seg0SizeT -= nb0T; - ptrT += nb0T; - { size_t const sizeCheck = fwrite(ptrT, sizeT, seg0SizeT, file); - if (sizeCheck != seg0SizeT) END_PROCESS(73, "Write error : cannot write decoded block"); - } } - ptrT += seg0SizeT; - } - - if (bufferSize & maskT) { /* size not multiple of sizeT : implies end of block */ - const char* const restStart = (const char*)bufferTEnd; - const char* restPtr = restStart; - size_t const restSize = bufferSize & maskT; - const char* const restEnd = restStart + restSize; - for (; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; - storedSkips += (unsigned) (restPtr - restStart); - if (restPtr != restEnd) { - int const seekResult = UTIL_fseek(file, storedSkips, SEEK_CUR); - if (seekResult) END_PROCESS(74, "Sparse skip error ; try --no-sparse"); - storedSkips = 0; - { size_t const sizeCheck = fwrite(restPtr, 1, (size_t)(restEnd - restPtr), file); - if (sizeCheck != (size_t)(restEnd - restPtr)) END_PROCESS(75, "Write error : cannot write decoded end of block"); - } } - } - - return storedSkips; -} - -static void LZ4IO_fwriteSparseEnd(FILE* file, unsigned storedSkips) -{ - if (storedSkips>0) { /* implies sparseFileSupport>0 */ - const char lastZeroByte[1] = { 0 }; - if (UTIL_fseek(file, storedSkips-1, SEEK_CUR) != 0) - END_PROCESS(68, "Final skip error (sparse file)\n"); - if (fwrite(lastZeroByte, 1, 1, file) != 1) - END_PROCESS(69, "Write error : cannot write last zero\n"); - } -} - - -static unsigned g_magicRead = 0; /* out-parameter of LZ4IO_decodeLegacyStream() */ - -static unsigned long long -LZ4IO_decodeLegacyStream(FILE* finput, FILE* foutput, const LZ4IO_prefs_t* prefs) -{ - unsigned long long streamSize = 0; - unsigned storedSkips = 0; - - /* Allocate Memory */ - char* const in_buff = (char*)malloc((size_t)LZ4_compressBound(LEGACY_BLOCKSIZE)); - char* const out_buff = (char*)malloc(LEGACY_BLOCKSIZE); - if (!in_buff || !out_buff) END_PROCESS(51, "Allocation error : not enough memory"); - - /* Main Loop */ - while (1) { - unsigned int blockSize; - - /* Block Size */ - { size_t const sizeCheck = fread(in_buff, 1, LZ4IO_LEGACY_BLOCK_HEADER_SIZE, finput); - if (sizeCheck == 0) break; /* Nothing to read : file read is completed */ - if (sizeCheck != LZ4IO_LEGACY_BLOCK_HEADER_SIZE) END_PROCESS(52, "Read error : cannot access block size "); - } - blockSize = LZ4IO_readLE32(in_buff); /* Convert to Little Endian */ - if (blockSize > LZ4_COMPRESSBOUND(LEGACY_BLOCKSIZE)) { - /* Cannot read next block : maybe new stream ? */ - g_magicRead = blockSize; - break; - } - - /* Read Block */ - { size_t const sizeCheck = fread(in_buff, 1, blockSize, finput); - if (sizeCheck != blockSize) END_PROCESS(53, "Read error : cannot access compressed block !"); } - - /* Decode Block */ - { int const decodeSize = LZ4_decompress_safe(in_buff, out_buff, (int)blockSize, LEGACY_BLOCKSIZE); - if (decodeSize < 0) END_PROCESS(54, "Decoding Failed ! Corrupted input detected !"); - streamSize += (unsigned long long)decodeSize; - /* Write Block */ - storedSkips = LZ4IO_fwriteSparse(foutput, out_buff, (size_t)decodeSize, prefs->sparseFileSupport, storedSkips); /* success or die */ - } } - if (ferror(finput)) END_PROCESS(55, "Read error : ferror"); - - LZ4IO_fwriteSparseEnd(foutput, storedSkips); - - /* Free */ - free(in_buff); - free(out_buff); - - return streamSize; -} - - - -typedef struct { - void* srcBuffer; - size_t srcBufferSize; - void* dstBuffer; - size_t dstBufferSize; - FILE* dstFile; - LZ4F_decompressionContext_t dCtx; - void* dictBuffer; - size_t dictBufferSize; -} dRess_t; - -static void LZ4IO_loadDDict(dRess_t* ress, const LZ4IO_prefs_t* const prefs) -{ - if (!prefs->useDictionary) { - ress->dictBuffer = NULL; - ress->dictBufferSize = 0; - return; - } - - ress->dictBuffer = LZ4IO_createDict(&ress->dictBufferSize, prefs->dictionaryFilename); - if (!ress->dictBuffer) END_PROCESS(25, "Dictionary error : could not create dictionary"); -} - -static const size_t LZ4IO_dBufferSize = 64 KB; -static dRess_t LZ4IO_createDResources(const LZ4IO_prefs_t* const prefs) -{ - dRess_t ress; - - /* init */ - LZ4F_errorCode_t const errorCode = LZ4F_createDecompressionContext(&ress.dCtx, LZ4F_VERSION); - if (LZ4F_isError(errorCode)) END_PROCESS(60, "Can't create LZ4F context : %s", LZ4F_getErrorName(errorCode)); - - /* Allocate Memory */ - ress.srcBufferSize = LZ4IO_dBufferSize; - ress.srcBuffer = malloc(ress.srcBufferSize); - ress.dstBufferSize = LZ4IO_dBufferSize; - ress.dstBuffer = malloc(ress.dstBufferSize); - if (!ress.srcBuffer || !ress.dstBuffer) END_PROCESS(61, "Allocation error : not enough memory"); - - LZ4IO_loadDDict(&ress, prefs); - - ress.dstFile = NULL; - return ress; -} - -static void LZ4IO_freeDResources(dRess_t ress) -{ - LZ4F_errorCode_t errorCode = LZ4F_freeDecompressionContext(ress.dCtx); - if (LZ4F_isError(errorCode)) END_PROCESS(69, "Error : can't free LZ4F context resource : %s", LZ4F_getErrorName(errorCode)); - free(ress.srcBuffer); - free(ress.dstBuffer); - free(ress.dictBuffer); -} - - -static unsigned long long -LZ4IO_decompressLZ4F(dRess_t ress, - FILE* const srcFile, FILE* const dstFile, - const LZ4IO_prefs_t* const prefs) -{ - unsigned long long filesize = 0; - LZ4F_errorCode_t nextToLoad; - unsigned storedSkips = 0; - LZ4F_decompressOptions_t const dOpt_skipCrc = { 0, 1, 0, 0 }; - const LZ4F_decompressOptions_t* const dOptPtr = - ((prefs->blockChecksum==0) && (prefs->streamChecksum==0)) ? - &dOpt_skipCrc : NULL; - - /* Init feed with magic number (already consumed from FILE* sFile) */ - { size_t inSize = MAGICNUMBER_SIZE; - size_t outSize= 0; - LZ4IO_writeLE32(ress.srcBuffer, LZ4IO_MAGICNUMBER); - nextToLoad = LZ4F_decompress_usingDict(ress.dCtx, - ress.dstBuffer, &outSize, - ress.srcBuffer, &inSize, - ress.dictBuffer, ress.dictBufferSize, - dOptPtr); /* set it once, it's enough */ - if (LZ4F_isError(nextToLoad)) - END_PROCESS(62, "Header error : %s", LZ4F_getErrorName(nextToLoad)); - } - - /* Main Loop */ - for (;nextToLoad;) { - size_t readSize; - size_t pos = 0; - size_t decodedBytes = ress.dstBufferSize; - - /* Read input */ - if (nextToLoad > ress.srcBufferSize) nextToLoad = ress.srcBufferSize; - readSize = fread(ress.srcBuffer, 1, nextToLoad, srcFile); - if (!readSize) break; /* reached end of file or stream */ - - while ((pos < readSize) || (decodedBytes == ress.dstBufferSize)) { /* still to read, or still to flush */ - /* Decode Input (at least partially) */ - size_t remaining = readSize - pos; - decodedBytes = ress.dstBufferSize; - nextToLoad = LZ4F_decompress_usingDict(ress.dCtx, - ress.dstBuffer, &decodedBytes, - (char*)(ress.srcBuffer)+pos, &remaining, - ress.dictBuffer, ress.dictBufferSize, - NULL); - if (LZ4F_isError(nextToLoad)) - END_PROCESS(66, "Decompression error : %s", LZ4F_getErrorName(nextToLoad)); - pos += remaining; - - /* Write Block */ - if (decodedBytes) { - if (!prefs->testMode) - storedSkips = LZ4IO_fwriteSparse(dstFile, ress.dstBuffer, decodedBytes, prefs->sparseFileSupport, storedSkips); - filesize += decodedBytes; - DISPLAYUPDATE(2, "\rDecompressed : %u MiB ", (unsigned)(filesize>>20)); - } - - if (!nextToLoad) break; - } - } - /* can be out because readSize == 0, which could be an fread() error */ - if (ferror(srcFile)) END_PROCESS(67, "Read error"); - - if (!prefs->testMode) LZ4IO_fwriteSparseEnd(dstFile, storedSkips); - if (nextToLoad!=0) END_PROCESS(68, "Unfinished stream"); - - return filesize; -} - - -/* LZ4IO_passThrough: - * just output the same content as input, no decoding. - * This is a capability of zcat, and by extension lz4cat - * MNstore : contain the first MAGICNUMBER_SIZE bytes already read from finput - */ -#define PTSIZE (64 KB) -#define PTSIZET (PTSIZE / sizeof(size_t)) -static unsigned long long -LZ4IO_passThrough(FILE* finput, FILE* foutput, - unsigned char MNstore[MAGICNUMBER_SIZE], - int sparseFileSupport) -{ - size_t buffer[PTSIZET]; - size_t readBytes = 1; - unsigned long long total = MAGICNUMBER_SIZE; - unsigned storedSkips = 0; - - if (fwrite(MNstore, 1, MAGICNUMBER_SIZE, foutput) != MAGICNUMBER_SIZE) { - END_PROCESS(50, "Pass-through write error"); - } - while (readBytes) { - readBytes = fread(buffer, 1, sizeof(buffer), finput); - total += readBytes; - storedSkips = LZ4IO_fwriteSparse(foutput, buffer, readBytes, sparseFileSupport, storedSkips); - } - if (ferror(finput)) END_PROCESS(51, "Read Error"); - - LZ4IO_fwriteSparseEnd(foutput, storedSkips); - return total; -} - -/* when fseek() doesn't work (pipe scenario), - * read and forget from input. -**/ -#define SKIP_BUFF_SIZE (16 KB) -#define MIN(a,b) ( ((a)<(b)) ? (a) : (b) ) -static int skipStream(FILE* f, unsigned offset) -{ - char buf[SKIP_BUFF_SIZE]; - while (offset > 0) { - size_t const tr = MIN(offset, sizeof(buf)); - size_t const r = fread(buf, 1, tr, f); - if (r != tr) return 1; /* error reading f */ - offset -= (unsigned)tr; - } - assert(offset == 0); - return 0; -} - -/** Safely handle cases when (unsigned)offset > LONG_MAX */ -static int fseek_u32(FILE *fp, unsigned offset, int where) -{ - const unsigned stepMax = 1U << 30; - int errorNb = 0; - - if (where != SEEK_CUR) return -1; /* Only allows SEEK_CUR */ - while (offset > 0) { - unsigned s = offset; - if (s > stepMax) s = stepMax; - errorNb = UTIL_fseek(fp, (long)s, SEEK_CUR); - if (errorNb==0) { offset -= s; continue; } - errorNb = skipStream(fp, offset); - offset = 0; - } - return errorNb; -} - - -#define ENDOFSTREAM ((unsigned long long)-1) -#define DECODING_ERROR ((unsigned long long)-2) -static unsigned long long -selectDecoder(dRess_t ress, - FILE* finput, FILE* foutput, - const LZ4IO_prefs_t* const prefs) -{ - unsigned char MNstore[MAGICNUMBER_SIZE]; - unsigned magicNumber; - static unsigned nbFrames = 0; - - /* init */ - nbFrames++; - - /* Check Archive Header */ - if (g_magicRead) { /* magic number already read from finput (see legacy frame)*/ - magicNumber = g_magicRead; - g_magicRead = 0; - } else { - size_t const nbReadBytes = fread(MNstore, 1, MAGICNUMBER_SIZE, finput); - if (nbReadBytes==0) { nbFrames = 0; return ENDOFSTREAM; } /* EOF */ - if (nbReadBytes != MAGICNUMBER_SIZE) - END_PROCESS(40, "Unrecognized header : Magic Number unreadable"); - magicNumber = LZ4IO_readLE32(MNstore); /* Little Endian format */ - } - if (LZ4IO_isSkippableMagicNumber(magicNumber)) - magicNumber = LZ4IO_SKIPPABLE0; /* fold skippable magic numbers */ - - switch(magicNumber) - { - case LZ4IO_MAGICNUMBER: - return LZ4IO_decompressLZ4F(ress, finput, foutput, prefs); - case LEGACY_MAGICNUMBER: - DISPLAYLEVEL(4, "Detected : Legacy format \n"); - return LZ4IO_decodeLegacyStream(finput, foutput, prefs); - case LZ4IO_SKIPPABLE0: - DISPLAYLEVEL(4, "Skipping detected skippable area \n"); - { size_t const nbReadBytes = fread(MNstore, 1, 4, finput); - if (nbReadBytes != 4) - END_PROCESS(42, "Stream error : skippable size unreadable"); - } - { unsigned const size = LZ4IO_readLE32(MNstore); - int const errorNb = fseek_u32(finput, size, SEEK_CUR); - if (errorNb != 0) - END_PROCESS(43, "Stream error : cannot skip skippable area"); - } - return 0; - default: - if (nbFrames == 1) { /* just started */ - /* Wrong magic number at the beginning of 1st stream */ - if (!prefs->testMode && prefs->overwrite && prefs->passThrough) { - nbFrames = 0; - return LZ4IO_passThrough(finput, foutput, MNstore, prefs->sparseFileSupport); - } - END_PROCESS(44,"Unrecognized header : file cannot be decoded"); - } - { long int const position = ftell(finput); /* only works for files < 2 GB */ - DISPLAYLEVEL(2, "Stream followed by undecodable data "); - if (position != -1L) - DISPLAYLEVEL(2, "at position %i ", (int)position); - DISPLAYLEVEL(2, "\n"); - } - return DECODING_ERROR; - } -} - - -static int -LZ4IO_decompressSrcFile(dRess_t ress, - const char* input_filename, const char* output_filename, - const LZ4IO_prefs_t* const prefs) -{ - FILE* const foutput = ress.dstFile; - unsigned long long filesize = 0; - int result = 0; - - /* Init */ - FILE* const finput = LZ4IO_openSrcFile(input_filename); - if (finput==NULL) return 1; - assert(foutput != NULL); - - /* Loop over multiple streams */ - for ( ; ; ) { /* endless loop, see break condition */ - unsigned long long const decodedSize = - selectDecoder(ress, finput, foutput, prefs); - if (decodedSize == ENDOFSTREAM) break; - if (decodedSize == DECODING_ERROR) { result=1; break; } - filesize += decodedSize; - } - - /* Close input */ - fclose(finput); - if (prefs->removeSrcFile) { /* --rm */ - if (remove(input_filename)) - END_PROCESS(45, "Remove error : %s: %s", input_filename, strerror(errno)); - } - - /* Final Status */ - DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2, "%-20.20s : decoded %llu bytes \n", input_filename, filesize); - (void)output_filename; - - return result; -} - - -static int -LZ4IO_decompressDstFile(dRess_t ress, - const char* input_filename, const char* output_filename, - const LZ4IO_prefs_t* const prefs) -{ - int result; - stat_t statbuf; - int stat_result = 0; - FILE* const foutput = LZ4IO_openDstFile(output_filename, prefs); - if (foutput==NULL) return 1; /* failure */ - - if ( !LZ4IO_isStdin(input_filename) - && UTIL_getFileStat(input_filename, &statbuf)) - stat_result = 1; - - ress.dstFile = foutput; - result = LZ4IO_decompressSrcFile(ress, input_filename, output_filename, prefs); - - fclose(foutput); - - /* Copy owner, file permissions and modification time */ - if ( stat_result != 0 - && !LZ4IO_isStdout(output_filename) - && !LZ4IO_isDevNull(output_filename)) { - UTIL_setFileStat(output_filename, &statbuf); - /* should return value be read ? or is silent fail good enough ? */ - } - - return result; -} - - -/* Note : LZ4IO_decompressFilename() - * can provide total decompression time for the specified fileName. - * This information is not available with LZ4IO_decompressMultipleFilenames(). - */ -int LZ4IO_decompressFilename(const char* input_filename, const char* output_filename, const LZ4IO_prefs_t* prefs) -{ - dRess_t const ress = LZ4IO_createDResources(prefs); - clock_t const start = clock(); - - int const status = LZ4IO_decompressDstFile(ress, input_filename, output_filename, prefs); - - clock_t const end = clock(); - double const seconds = (double)(end - start) / CLOCKS_PER_SEC; - DISPLAYLEVEL(4, "Done in %.2f sec \n", seconds); - - LZ4IO_freeDResources(ress); - return status; -} - - -int LZ4IO_decompressMultipleFilenames( - const char** inFileNamesTable, int ifntSize, - const char* suffix, - const LZ4IO_prefs_t* prefs) -{ - int i; - int skippedFiles = 0; - int missingFiles = 0; - char* outFileName = (char*)malloc(FNSPACE); - size_t ofnSize = FNSPACE; - size_t const suffixSize = strlen(suffix); - dRess_t ress = LZ4IO_createDResources(prefs); - - if (outFileName==NULL) END_PROCESS(70, "Memory allocation error"); - if (prefs->blockChecksum==0 && prefs->streamChecksum==0) { - DISPLAYLEVEL(4, "disabling checksum validation during decoding \n"); - } - ress.dstFile = LZ4IO_openDstFile(stdoutmark, prefs); - - for (i=0; i LZ4IO_LEGACY_BLOCK_SIZE_MAX) { - DISPLAYLEVEL(4, "Error : block in legacy frame is too large \n"); - return legacyFrameUndecodable; - } - totalBlocksSize += LZ4IO_LEGACY_BLOCK_HEADER_SIZE + nextCBlockSize; - /* skip to the next block - * note : this won't fail if nextCBlockSize is too large, skipping past the end of finput */ - if (UTIL_fseek(finput, nextCBlockSize, SEEK_CUR) != 0) { - return legacyFrameUndecodable; - } } } - return totalBlocksSize; -} - -/* LZ4IO_blockTypeID: - * return human-readable block type, following command line convention - * buffer : must be a valid memory area of at least 4 bytes */ -const char* LZ4IO_blockTypeID(LZ4F_blockSizeID_t sizeID, LZ4F_blockMode_t blockMode, char buffer[4]) -{ - buffer[0] = 'B'; - assert(sizeID >= 4); assert(sizeID <= 7); - buffer[1] = (char)(sizeID + '0'); - buffer[2] = (blockMode == LZ4F_blockIndependent) ? 'I' : 'D'; - buffer[3] = 0; - return buffer; -} - -/* buffer : must be valid memory area of at least 10 bytes */ -static const char* LZ4IO_toHuman(long double size, char *buf) -{ - const char units[] = {"\0KMGTPEZY"}; - size_t i = 0; - for (; size >= 1024; i++) size /= 1024; - sprintf(buf, "%.2Lf%c", size, units[i]); - return buf; -} - -/* Get filename without path prefix */ -static const char* LZ4IO_baseName(const char* input_filename) -{ - const char* b = strrchr(input_filename, '/'); - if (!b) b = strrchr(input_filename, '\\'); - if (!b) return input_filename; - return b + 1; -} - -/* Report frame/s information (--list) in verbose mode (-v). - * Will populate file info with fileName and frameSummary where applicable. - * - TODO : - * + report nb of blocks, hence max. possible decompressed size (when not reported in header) - */ -static LZ4IO_infoResult -LZ4IO_getCompressedFileInfo(LZ4IO_cFileInfo_t* cfinfo, const char* input_filename) -{ - LZ4IO_infoResult result = LZ4IO_format_not_known; /* default result (error) */ - unsigned char buffer[LZ4F_HEADER_SIZE_MAX]; - FILE* const finput = LZ4IO_openSrcFile(input_filename); - - if (finput == NULL) return LZ4IO_not_a_file; - cfinfo->fileSize = UTIL_getOpenFileSize(finput); - - while (!feof(finput)) { - LZ4IO_frameInfo_t frameInfo = LZ4IO_INIT_FRAMEINFO; - unsigned magicNumber; - /* Get MagicNumber */ - { size_t const nbReadBytes = fread(buffer, 1, MAGICNUMBER_SIZE, finput); - if (nbReadBytes == 0) { break; } /* EOF */ - result = LZ4IO_format_not_known; /* default result (error) */ - if (nbReadBytes != MAGICNUMBER_SIZE) { - END_PROCESS(40, "Unrecognized header : Magic Number unreadable"); - } } - magicNumber = LZ4IO_readLE32(buffer); /* Little Endian format */ - if (LZ4IO_isSkippableMagicNumber(magicNumber)) - magicNumber = LZ4IO_SKIPPABLE0; /* fold skippable magic numbers */ - - switch (magicNumber) { - case LZ4IO_MAGICNUMBER: - if (cfinfo->frameSummary.frameType != lz4Frame) cfinfo->eqFrameTypes = 0; - /* Get frame info */ - { const size_t readBytes = fread(buffer + MAGICNUMBER_SIZE, 1, LZ4F_HEADER_SIZE_MIN - MAGICNUMBER_SIZE, finput); - if (!readBytes || ferror(finput)) END_PROCESS(71, "Error reading %s", input_filename); - } - { size_t hSize = LZ4F_headerSize(&buffer, LZ4F_HEADER_SIZE_MIN); - if (LZ4F_isError(hSize)) break; - if (hSize > (LZ4F_HEADER_SIZE_MIN + MAGICNUMBER_SIZE)) { - /* We've already read LZ4F_HEADER_SIZE_MIN so read any extra until hSize*/ - const size_t readBytes = fread(buffer + LZ4F_HEADER_SIZE_MIN, 1, hSize - LZ4F_HEADER_SIZE_MIN, finput); - if (!readBytes || ferror(finput)) END_PROCESS(72, "Error reading %s", input_filename); - } - /* Create decompression context */ - { LZ4F_dctx* dctx; - if ( LZ4F_isError(LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION)) ) break; - { unsigned const frameInfoError = LZ4F_isError(LZ4F_getFrameInfo(dctx, &frameInfo.lz4FrameInfo, buffer, &hSize)); - LZ4F_freeDecompressionContext(dctx); - if (frameInfoError) break; - if ((cfinfo->frameSummary.lz4FrameInfo.blockSizeID != frameInfo.lz4FrameInfo.blockSizeID || - cfinfo->frameSummary.lz4FrameInfo.blockMode != frameInfo.lz4FrameInfo.blockMode) - && cfinfo->frameCount != 0) - cfinfo->eqBlockTypes = 0; - { const unsigned long long totalBlocksSize = LZ4IO_skipBlocksData(finput, - frameInfo.lz4FrameInfo.blockChecksumFlag, - frameInfo.lz4FrameInfo.contentChecksumFlag); - if (totalBlocksSize) { - char bTypeBuffer[5]; - LZ4IO_blockTypeID(frameInfo.lz4FrameInfo.blockSizeID, frameInfo.lz4FrameInfo.blockMode, bTypeBuffer); - DISPLAYLEVEL(3, " %6llu %14s %5s %8s", - cfinfo->frameCount + 1, - LZ4IO_frameTypeNames[frameInfo.frameType], - bTypeBuffer, - frameInfo.lz4FrameInfo.contentChecksumFlag ? "XXH32" : "-"); - if (frameInfo.lz4FrameInfo.contentSize) { - { double const ratio = (double)(totalBlocksSize + hSize) / frameInfo.lz4FrameInfo.contentSize * 100; - DISPLAYLEVEL(3, " %20llu %20llu %9.2f%%\n", - totalBlocksSize + hSize, - frameInfo.lz4FrameInfo.contentSize, - ratio); - } - /* Now we've consumed frameInfo we can use it to store the total contentSize */ - frameInfo.lz4FrameInfo.contentSize += cfinfo->frameSummary.lz4FrameInfo.contentSize; - } - else { - DISPLAYLEVEL(3, " %20llu %20s %9s \n", totalBlocksSize + hSize, "-", "-"); - cfinfo->allContentSize = 0; - } - result = LZ4IO_LZ4F_OK; - } } } } } - break; - case LEGACY_MAGICNUMBER: - frameInfo.frameType = legacyFrame; - if (cfinfo->frameSummary.frameType != legacyFrame && cfinfo->frameCount != 0) cfinfo->eqFrameTypes = 0; - cfinfo->eqBlockTypes = 0; - cfinfo->allContentSize = 0; - { const unsigned long long totalBlocksSize = LZ4IO_skipLegacyBlocksData(finput); - if (totalBlocksSize == legacyFrameUndecodable) { - DISPLAYLEVEL(1, "Corrupted legacy frame \n"); - result = LZ4IO_format_not_known; - break; - } - if (totalBlocksSize) { - DISPLAYLEVEL(3, " %6llu %14s %5s %8s %20llu %20s %9s\n", - cfinfo->frameCount + 1, - LZ4IO_frameTypeNames[frameInfo.frameType], - "-", "-", - totalBlocksSize + 4, - "-", "-"); - result = LZ4IO_LZ4F_OK; - } } - break; - case LZ4IO_SKIPPABLE0: - frameInfo.frameType = skippableFrame; - if (cfinfo->frameSummary.frameType != skippableFrame && cfinfo->frameCount != 0) cfinfo->eqFrameTypes = 0; - cfinfo->eqBlockTypes = 0; - cfinfo->allContentSize = 0; - { size_t const nbReadBytes = fread(buffer, 1, 4, finput); - if (nbReadBytes != 4) - END_PROCESS(42, "Stream error : skippable size unreadable"); - } - { unsigned const size = LZ4IO_readLE32(buffer); - int const errorNb = fseek_u32(finput, size, SEEK_CUR); - if (errorNb != 0) - END_PROCESS(43, "Stream error : cannot skip skippable area"); - DISPLAYLEVEL(3, " %6llu %14s %5s %8s %20u %20s %9s\n", - cfinfo->frameCount + 1, - "SkippableFrame", - "-", "-", size + 8, "-", "-"); - - result = LZ4IO_LZ4F_OK; - } - break; - default: - { long int const position = ftell(finput); /* only works for files < 2 GB */ - DISPLAYLEVEL(3, "Stream followed by undecodable data "); - if (position != -1L) - DISPLAYLEVEL(3, "at position %i ", (int)position); - result = LZ4IO_format_not_known; - DISPLAYLEVEL(3, "\n"); - } - break; - } - if (result != LZ4IO_LZ4F_OK) break; - cfinfo->frameSummary = frameInfo; - cfinfo->frameCount++; - } /* while (!feof(finput)) */ - fclose(finput); - return result; -} - - -int LZ4IO_displayCompressedFilesInfo(const char** inFileNames, size_t ifnIdx) -{ - int result = 0; - size_t idx = 0; - if (g_displayLevel < 3) { - DISPLAYOUT("%10s %14s %5s %11s %13s %9s %s\n", - "Frames", "Type", "Block", "Compressed", "Uncompressed", "Ratio", "Filename"); - } - for (; idx < ifnIdx; idx++) { - /* Get file info */ - LZ4IO_cFileInfo_t cfinfo = LZ4IO_INIT_CFILEINFO; - cfinfo.fileName = LZ4IO_baseName(inFileNames[idx]); - if (LZ4IO_isStdin(inFileNames[idx]) ? !UTIL_isRegFD(0) : !UTIL_isRegFile(inFileNames[idx])) { - DISPLAYLEVEL(1, "lz4: %s is not a regular file \n", inFileNames[idx]); - return 1; - } - DISPLAYLEVEL(3, "%s(%llu/%llu)\n", cfinfo.fileName, (unsigned long long)idx + 1, (unsigned long long)ifnIdx); - DISPLAYLEVEL(3, " %6s %14s %5s %8s %20s %20s %9s\n", - "Frame", "Type", "Block", "Checksum", "Compressed", "Uncompressed", "Ratio") - { LZ4IO_infoResult const op_result = LZ4IO_getCompressedFileInfo(&cfinfo, inFileNames[idx]); - if (op_result != LZ4IO_LZ4F_OK) { - assert(op_result == LZ4IO_format_not_known); - DISPLAYLEVEL(1, "lz4: %s: File format not recognized \n", inFileNames[idx]); - return 1; - } } - DISPLAYLEVEL(3, "\n"); - if (g_displayLevel < 3) { - /* Display Summary */ - { char buffers[3][10]; - DISPLAYOUT("%10llu %14s %5s %11s %13s ", - cfinfo.frameCount, - cfinfo.eqFrameTypes ? LZ4IO_frameTypeNames[cfinfo.frameSummary.frameType] : "-" , - cfinfo.eqBlockTypes ? LZ4IO_blockTypeID(cfinfo.frameSummary.lz4FrameInfo.blockSizeID, - cfinfo.frameSummary.lz4FrameInfo.blockMode, buffers[0]) : "-", - LZ4IO_toHuman((long double)cfinfo.fileSize, buffers[1]), - cfinfo.allContentSize ? LZ4IO_toHuman((long double)cfinfo.frameSummary.lz4FrameInfo.contentSize, buffers[2]) : "-"); - if (cfinfo.allContentSize) { - double const ratio = (double)cfinfo.fileSize / cfinfo.frameSummary.lz4FrameInfo.contentSize * 100; - DISPLAYOUT("%9.2f%% %s \n", ratio, cfinfo.fileName); - } else { - DISPLAYOUT("%9s %s\n", - "-", - cfinfo.fileName); - } } } /* if (g_displayLevel < 3) */ - } /* for (; idx < ifnIdx; idx++) */ - - return result; -} diff --git a/librocksdb-sys/lz4/programs/lz4io.h b/librocksdb-sys/lz4/programs/lz4io.h deleted file mode 100644 index 0cfb1d2..0000000 --- a/librocksdb-sys/lz4/programs/lz4io.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - LZ4io.h - LZ4 File/Stream Interface - Copyright (C) Yann Collet 2011-2020 - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ -/* - Note : this is stand-alone program. - It is not part of LZ4 compression library, it is a user code of the LZ4 library. - - The license of LZ4 library is BSD. - - The license of xxHash library is BSD. - - The license of this source file is GPLv2. -*/ - -#ifndef LZ4IO_H_237902873 -#define LZ4IO_H_237902873 - -/*--- Dependency ---*/ -#include /* size_t */ - - -/* ************************************************** */ -/* Special input/output values */ -/* ************************************************** */ -#define stdinmark "stdin" -#define stdoutmark "stdout" -#define NULL_OUTPUT "null" -#ifdef _WIN32 -#define nulmark "nul" -#else -#define nulmark "/dev/null" -#endif - -/* ************************************************** */ -/* ****************** Type Definitions ************** */ -/* ************************************************** */ - -typedef struct LZ4IO_prefs_s LZ4IO_prefs_t; - -LZ4IO_prefs_t* LZ4IO_defaultPreferences(void); -void LZ4IO_freePreferences(LZ4IO_prefs_t* prefs); - - -/* ************************************************** */ -/* ****************** Functions ********************* */ -/* ************************************************** */ - -/* if output_filename == stdoutmark, writes to stdout */ -int LZ4IO_compressFilename(const char* input_filename, const char* output_filename, int compressionlevel, const LZ4IO_prefs_t* prefs); -int LZ4IO_decompressFilename(const char* input_filename, const char* output_filename, const LZ4IO_prefs_t* prefs); - -/* if suffix == stdoutmark, writes to stdout */ -int LZ4IO_compressMultipleFilenames(const char** inFileNamesTable, int ifntSize, const char* suffix, int compressionlevel, const LZ4IO_prefs_t* prefs); -int LZ4IO_decompressMultipleFilenames(const char** inFileNamesTable, int ifntSize, const char* suffix, const LZ4IO_prefs_t* prefs); - - -/* ************************************************** */ -/* ****************** Parameters ******************** */ -/* ************************************************** */ - -int LZ4IO_setDictionaryFilename(LZ4IO_prefs_t* const prefs, const char* dictionaryFilename); - -/* Default setting : passThrough = 0; - return : passThrough mode (0/1) */ -int LZ4IO_setPassThrough(LZ4IO_prefs_t* const prefs, int yes); - -/* Default setting : overwrite = 1; - return : overwrite mode (0/1) */ -int LZ4IO_setOverwrite(LZ4IO_prefs_t* const prefs, int yes); - -/* Default setting : testMode = 0; - return : testMode (0/1) */ -int LZ4IO_setTestMode(LZ4IO_prefs_t* const prefs, int yes); - -/* blockSizeID : valid values : 4-5-6-7 - return : 0 if error, blockSize if OK */ -size_t LZ4IO_setBlockSizeID(LZ4IO_prefs_t* const prefs, unsigned blockSizeID); - -/* blockSize : valid values : 32 -> 4MB - return : 0 if error, actual blocksize if OK */ -size_t LZ4IO_setBlockSize(LZ4IO_prefs_t* const prefs, size_t blockSize); - -/* Default setting : independent blocks */ -typedef enum { LZ4IO_blockLinked=0, LZ4IO_blockIndependent} LZ4IO_blockMode_t; -int LZ4IO_setBlockMode(LZ4IO_prefs_t* const prefs, LZ4IO_blockMode_t blockMode); - -/* Default setting : no block checksum */ -int LZ4IO_setBlockChecksumMode(LZ4IO_prefs_t* const prefs, int xxhash); - -/* Default setting : stream checksum enabled */ -int LZ4IO_setStreamChecksumMode(LZ4IO_prefs_t* const prefs, int xxhash); - -/* Default setting : 0 (no notification) */ -int LZ4IO_setNotificationLevel(int level); - -/* Default setting : 0 (disabled) */ -int LZ4IO_setSparseFile(LZ4IO_prefs_t* const prefs, int enable); - -/* Default setting : 0 == no content size present in frame header */ -int LZ4IO_setContentSize(LZ4IO_prefs_t* const prefs, int enable); - -/* Default setting : 0 == src file preserved */ -void LZ4IO_setRemoveSrcFile(LZ4IO_prefs_t* const prefs, unsigned flag); - -/* Default setting : 0 == favor compression ratio - * Note : 1 only works for high compression levels (10+) */ -void LZ4IO_favorDecSpeed(LZ4IO_prefs_t* const prefs, int favor); - - -/* implement --list - * @return 0 on success, 1 on error */ -int LZ4IO_displayCompressedFilesInfo(const char** inFileNames, size_t ifnIdx); - - -#endif /* LZ4IO_H_237902873 */ diff --git a/librocksdb-sys/lz4/programs/platform.h b/librocksdb-sys/lz4/programs/platform.h deleted file mode 100644 index 43a171b..0000000 --- a/librocksdb-sys/lz4/programs/platform.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - platform.h - compiler and OS detection - Copyright (C) 2016-2020, Przemyslaw Skibinski, Yann Collet - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -*/ - -#ifndef PLATFORM_H_MODULE -#define PLATFORM_H_MODULE - -#if defined (__cplusplus) -extern "C" { -#endif - - - -/* ************************************** -* Compiler Options -****************************************/ -#if defined(_MSC_VER) -# define _CRT_SECURE_NO_WARNINGS /* Disable Visual Studio warning messages for fopen, strncpy, strerror */ -# if (_MSC_VER <= 1800) /* (1800 = Visual Studio 2013) */ -# define _CRT_SECURE_NO_DEPRECATE /* VS2005 - must be declared before and */ -# define snprintf sprintf_s /* snprintf unsupported by Visual <= 2013 */ -# endif -#endif - - -/* ************************************** -* Detect 64-bit OS -* http://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros -****************************************/ -#if defined __ia64 || defined _M_IA64 /* Intel Itanium */ \ - || defined __powerpc64__ || defined __ppc64__ || defined __PPC64__ /* POWER 64-bit */ \ - || (defined __sparc && (defined __sparcv9 || defined __sparc_v9__ || defined __arch64__)) || defined __sparc64__ /* SPARC 64-bit */ \ - || defined __x86_64__s || defined _M_X64 /* x86 64-bit */ \ - || defined __arm64__ || defined __aarch64__ || defined __ARM64_ARCH_8__ /* ARM 64-bit */ \ - || (defined __mips && (__mips == 64 || __mips == 4 || __mips == 3)) /* MIPS 64-bit */ \ - || defined _LP64 || defined __LP64__ /* NetBSD, OpenBSD */ || defined __64BIT__ /* AIX */ || defined _ADDR64 /* Cray */ \ - || (defined __SIZEOF_POINTER__ && __SIZEOF_POINTER__ == 8) /* gcc */ -# if !defined(__64BIT__) -# define __64BIT__ 1 -# endif -#endif - - -/* ********************************************************* -* Turn on Large Files support (>4GB) for 32-bit Linux/Unix -***********************************************************/ -#if !defined(__64BIT__) || defined(__MINGW32__) /* No point defining Large file for 64 bit but MinGW-w64 requires it */ -# if !defined(_FILE_OFFSET_BITS) -# define _FILE_OFFSET_BITS 64 /* turn off_t into a 64-bit type for ftello, fseeko */ -# endif -# if !defined(_LARGEFILE_SOURCE) /* obsolete macro, replaced with _FILE_OFFSET_BITS */ -# define _LARGEFILE_SOURCE 1 /* Large File Support extension (LFS) - fseeko, ftello */ -# endif -# if defined(_AIX) || defined(__hpux) -# define _LARGE_FILES /* Large file support on 32-bits AIX and HP-UX */ -# endif -#endif - - -/* ************************************************************ -* Detect POSIX version -* PLATFORM_POSIX_VERSION = -1 for non-Unix e.g. Windows -* PLATFORM_POSIX_VERSION = 0 for Unix-like non-POSIX -* PLATFORM_POSIX_VERSION >= 1 is equal to found _POSIX_VERSION -************************************************************** */ -#if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__)) /* UNIX-like OS */ \ - || defined(__midipix__) || defined(__VMS)) -# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ \ - || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__MidnightBSD__) /* BSD distros */ \ - || defined(__HAIKU__) -# define PLATFORM_POSIX_VERSION 200112L -# else -# if defined(__linux__) || defined(__linux) -# ifndef _POSIX_C_SOURCE -# define _POSIX_C_SOURCE 200809L /* use feature test macro */ -# endif -# endif -# include /* declares _POSIX_VERSION */ -# if defined(_POSIX_VERSION) /* POSIX compliant */ -# define PLATFORM_POSIX_VERSION _POSIX_VERSION -# else -# define PLATFORM_POSIX_VERSION 0 -# endif -# endif -#endif -#if !defined(PLATFORM_POSIX_VERSION) -# define PLATFORM_POSIX_VERSION -1 -#endif - - -/*-********************************************* -* Detect if isatty() and fileno() are available -*********************************************** */ -#if (defined(__linux__) && (PLATFORM_POSIX_VERSION >= 1)) || (PLATFORM_POSIX_VERSION >= 200112L) || defined(__DJGPP__) -# include /* isatty */ -# define IS_CONSOLE(stdStream) isatty(fileno(stdStream)) -#elif defined(MSDOS) || defined(OS2) || defined(__CYGWIN__) -# include /* _isatty */ -# define IS_CONSOLE(stdStream) _isatty(_fileno(stdStream)) -#elif defined(WIN32) || defined(_WIN32) -# include /* _isatty */ -# include /* DeviceIoControl, HANDLE, FSCTL_SET_SPARSE */ -# include /* FILE */ -static __inline int IS_CONSOLE(FILE* stdStream) -{ - DWORD dummy; - return _isatty(_fileno(stdStream)) && GetConsoleMode((HANDLE)_get_osfhandle(_fileno(stdStream)), &dummy); -} -#else -# define IS_CONSOLE(stdStream) 0 -#endif - - -/****************************** -* OS-specific Includes -***************************** */ -#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) -# include /* _O_BINARY */ -# include /* _setmode, _fileno, _get_osfhandle */ -# if !defined(__DJGPP__) -# include /* DeviceIoControl, HANDLE, FSCTL_SET_SPARSE */ -# include /* FSCTL_SET_SPARSE */ -# define SET_BINARY_MODE(file) { int unused=_setmode(_fileno(file), _O_BINARY); (void)unused; } -# define SET_SPARSE_FILE_MODE(file) { DWORD dw; DeviceIoControl((HANDLE) _get_osfhandle(_fileno(file)), FSCTL_SET_SPARSE, 0, 0, 0, 0, &dw, 0); } -# else -# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) -# define SET_SPARSE_FILE_MODE(file) -# endif -#else -# define SET_BINARY_MODE(file) -# define SET_SPARSE_FILE_MODE(file) -#endif - - - -#if defined (__cplusplus) -} -#endif - -#endif /* PLATFORM_H_MODULE */ diff --git a/librocksdb-sys/lz4/programs/util.h b/librocksdb-sys/lz4/programs/util.h deleted file mode 100644 index 3192ddc..0000000 --- a/librocksdb-sys/lz4/programs/util.h +++ /dev/null @@ -1,697 +0,0 @@ -/* - util.h - utility functions - Copyright (C) 2016-2020, Przemyslaw Skibinski, Yann Collet - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -*/ - -#ifndef UTIL_H_MODULE -#define UTIL_H_MODULE - -#if defined (__cplusplus) -extern "C" { -#endif - - - -/*-**************************************** -* Dependencies -******************************************/ -#include "platform.h" /* PLATFORM_POSIX_VERSION */ -#include /* size_t, ptrdiff_t */ -#include /* malloc */ -#include /* strlen, strncpy */ -#include /* fprintf, fileno */ -#include -#include /* stat, utime */ -#include /* stat */ -#if defined(_WIN32) -# include /* utime */ -# include /* _chmod */ -#else -# include /* chown, stat */ -# if PLATFORM_POSIX_VERSION < 200809L -# include /* utime */ -# else -# include /* AT_FDCWD */ -# include /* for utimensat */ -# endif -#endif -#include /* time */ -#include /* INT_MAX */ -#include - - - -/*-************************************************************** -* Basic Types -*****************************************************************/ -#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef int16_t S16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; - typedef int64_t S64; -#else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef signed short S16; - typedef unsigned int U32; - typedef signed int S32; - typedef unsigned long long U64; - typedef signed long long S64; -#endif - - -/* ************************************************************ -* Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW -***************************************************************/ -#if defined(_MSC_VER) && (_MSC_VER >= 1400) -# define UTIL_fseek _fseeki64 -#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ -# define UTIL_fseek fseeko -#elif defined(__MINGW32__) && defined(__MSVCRT__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) -# define UTIL_fseek fseeko64 -#else -# define UTIL_fseek fseek -#endif - - -/*-**************************************** -* Sleep functions: Windows - Posix - others -******************************************/ -#if defined(_WIN32) -# include -# define SET_REALTIME_PRIORITY SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS) -# define UTIL_sleep(s) Sleep(1000*s) -# define UTIL_sleepMilli(milli) Sleep(milli) -#elif PLATFORM_POSIX_VERSION >= 0 /* Unix-like operating system */ -# include -# include /* setpriority */ -# include /* clock_t, nanosleep, clock, CLOCKS_PER_SEC */ -# if defined(PRIO_PROCESS) -# define SET_REALTIME_PRIORITY setpriority(PRIO_PROCESS, 0, -20) -# else -# define SET_REALTIME_PRIORITY /* disabled */ -# endif -# define UTIL_sleep(s) sleep(s) -# if (defined(__linux__) && (PLATFORM_POSIX_VERSION >= 199309L)) || (PLATFORM_POSIX_VERSION >= 200112L) /* nanosleep requires POSIX.1-2001 */ -# define UTIL_sleepMilli(milli) { struct timespec t; t.tv_sec=0; t.tv_nsec=milli*1000000ULL; nanosleep(&t, NULL); } -# else -# define UTIL_sleepMilli(milli) /* disabled */ -# endif -#else -# define SET_REALTIME_PRIORITY /* disabled */ -# define UTIL_sleep(s) /* disabled */ -# define UTIL_sleepMilli(milli) /* disabled */ -#endif - - -/*-**************************************** -* stat() functions -******************************************/ -#if defined(_MSC_VER) -# define UTIL_TYPE_stat __stat64 -# define UTIL_stat _stat64 -# define UTIL_fstat _fstat64 -# define UTIL_STAT_MODE_ISREG(st_mode) ((st_mode) & S_IFREG) -#elif defined(__MINGW32__) && defined (__MSVCRT__) -# define UTIL_TYPE_stat _stati64 -# define UTIL_stat _stati64 -# define UTIL_fstat _fstati64 -# define UTIL_STAT_MODE_ISREG(st_mode) ((st_mode) & S_IFREG) -#else -# define UTIL_TYPE_stat stat -# define UTIL_stat stat -# define UTIL_fstat fstat -# define UTIL_STAT_MODE_ISREG(st_mode) (S_ISREG(st_mode)) -#endif - - -/*-**************************************** -* fileno() function -******************************************/ -#if defined(_MSC_VER) -# define UTIL_fileno _fileno -#else -# define UTIL_fileno fileno -#endif - -/* ************************************* -* Constants -***************************************/ -#define LIST_SIZE_INCREASE (8*1024) - - -/*-**************************************** -* Compiler specifics -******************************************/ -#if defined(__INTEL_COMPILER) -# pragma warning(disable : 177) /* disable: message #177: function was declared but never referenced, useful with UTIL_STATIC */ -#endif -#if defined(__GNUC__) -# define UTIL_STATIC static __attribute__((unused)) -#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define UTIL_STATIC static inline -#elif defined(_MSC_VER) -# define UTIL_STATIC static __inline -#else -# define UTIL_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ -#endif - - - -/*-**************************************** -* Allocation functions -******************************************/ -/* - * A modified version of realloc(). - * If UTIL_realloc() fails the original block is freed. -*/ -UTIL_STATIC void* UTIL_realloc(void* ptr, size_t size) -{ - void* const newptr = realloc(ptr, size); - if (newptr) return newptr; - free(ptr); - return NULL; -} - - -/*-**************************************** -* String functions -******************************************/ -/* - * A modified version of realloc(). - * If UTIL_realloc() fails the original block is freed. -*/ -UTIL_STATIC int UTIL_sameString(const char* a, const char* b) -{ - assert(a!=NULL && b!=NULL); /* unsupported scenario */ - if (a==NULL) return 0; - if (b==NULL) return 0; - return !strcmp(a,b); -} - - -/*-**************************************** -* Time functions -******************************************/ -#if defined(_WIN32) /* Windows */ - - typedef LARGE_INTEGER UTIL_time_t; - UTIL_STATIC UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; } - UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) - { - static LARGE_INTEGER ticksPerSecond; - static int init = 0; - if (!init) { - if (!QueryPerformanceFrequency(&ticksPerSecond)) - fprintf(stderr, "ERROR: QueryPerformanceFrequency() failure\n"); - init = 1; - } - return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; - } - UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) - { - static LARGE_INTEGER ticksPerSecond; - static int init = 0; - if (!init) { - if (!QueryPerformanceFrequency(&ticksPerSecond)) - fprintf(stderr, "ERROR: QueryPerformanceFrequency() failure\n"); - init = 1; - } - return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; - } - -#elif defined(__APPLE__) && defined(__MACH__) - - #include - typedef U64 UTIL_time_t; - UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); } - UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) - { - static mach_timebase_info_data_t rate; - static int init = 0; - if (!init) { - mach_timebase_info(&rate); - init = 1; - } - return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom)) / 1000ULL; - } - UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) - { - static mach_timebase_info_data_t rate; - static int init = 0; - if (!init) { - mach_timebase_info(&rate); - init = 1; - } - return ((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom); - } - -#elif (PLATFORM_POSIX_VERSION >= 200112L) && (defined __UCLIBC__ || (defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17) || __GLIBC__ > 2) ) ) - - #include - typedef struct timespec UTIL_time_t; - UTIL_STATIC UTIL_time_t UTIL_getTime(void) - { - UTIL_time_t now; - if (clock_gettime(CLOCK_MONOTONIC, &now)) - fprintf(stderr, "ERROR: Failed to get time\n"); /* we could also exit() */ - return now; - } - UTIL_STATIC UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end) - { - UTIL_time_t diff; - if (end.tv_nsec < begin.tv_nsec) { - diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec; - diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec; - } else { - diff.tv_sec = end.tv_sec - begin.tv_sec; - diff.tv_nsec = end.tv_nsec - begin.tv_nsec; - } - return diff; - } - UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) - { - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - U64 micro = 0; - micro += 1000000ULL * diff.tv_sec; - micro += diff.tv_nsec / 1000ULL; - return micro; - } - UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end) - { - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - U64 nano = 0; - nano += 1000000000ULL * diff.tv_sec; - nano += diff.tv_nsec; - return nano; - } - -#else /* relies on standard C (note : clock_t measurements can be wrong when using multi-threading) */ - - typedef clock_t UTIL_time_t; - UTIL_STATIC UTIL_time_t UTIL_getTime(void) { return clock(); } - UTIL_STATIC U64 UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } - UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } -#endif - - -/* returns time span in microseconds */ -UTIL_STATIC U64 UTIL_clockSpanMicro(UTIL_time_t clockStart) -{ - UTIL_time_t const clockEnd = UTIL_getTime(); - return UTIL_getSpanTimeMicro(clockStart, clockEnd); -} - -/* returns time span in nanoseconds */ -UTIL_STATIC U64 UTIL_clockSpanNano(UTIL_time_t clockStart) -{ - UTIL_time_t const clockEnd = UTIL_getTime(); - return UTIL_getSpanTimeNano(clockStart, clockEnd); -} - -UTIL_STATIC void UTIL_waitForNextTick(void) -{ - UTIL_time_t const clockStart = UTIL_getTime(); - UTIL_time_t clockEnd; - do { - clockEnd = UTIL_getTime(); - } while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0); -} - - - -/*-**************************************** -* File functions -******************************************/ -#if defined(_MSC_VER) - #define chmod _chmod - typedef struct __stat64 stat_t; -#else - typedef struct stat stat_t; -#endif - - -UTIL_STATIC int UTIL_isRegFile(const char* infilename); -UTIL_STATIC int UTIL_isRegFD(int fd); - - -UTIL_STATIC int UTIL_setFileStat(const char *filename, stat_t *statbuf) -{ - int res = 0; - - if (!UTIL_isRegFile(filename)) - return -1; - - { -#if defined(_WIN32) || (PLATFORM_POSIX_VERSION < 200809L) - struct utimbuf timebuf; - timebuf.actime = time(NULL); - timebuf.modtime = statbuf->st_mtime; - res += utime(filename, &timebuf); /* set access and modification times */ -#else - struct timespec timebuf[2]; - memset(timebuf, 0, sizeof(timebuf)); - timebuf[0].tv_nsec = UTIME_NOW; - timebuf[1].tv_sec = statbuf->st_mtime; - res += utimensat(AT_FDCWD, filename, timebuf, 0); /* set access and modification times */ -#endif - } - -#if !defined(_WIN32) - res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ -#endif - - res += chmod(filename, statbuf->st_mode & 07777); /* Copy file permissions */ - - errno = 0; - return -res; /* number of errors is returned */ -} - - -UTIL_STATIC int UTIL_getFDStat(int fd, stat_t *statbuf) -{ - int r; -#if defined(_MSC_VER) - r = _fstat64(fd, statbuf); - if (r || !(statbuf->st_mode & S_IFREG)) return 0; /* No good... */ -#else - r = fstat(fd, statbuf); - if (r || !S_ISREG(statbuf->st_mode)) return 0; /* No good... */ -#endif - return 1; -} - -UTIL_STATIC int UTIL_getFileStat(const char* infilename, stat_t *statbuf) -{ - int r; -#if defined(_MSC_VER) - r = _stat64(infilename, statbuf); - if (r || !(statbuf->st_mode & S_IFREG)) return 0; /* No good... */ -#else - r = stat(infilename, statbuf); - if (r || !S_ISREG(statbuf->st_mode)) return 0; /* No good... */ -#endif - return 1; -} - - -UTIL_STATIC int UTIL_isRegFD(int fd) -{ - stat_t statbuf; -#ifdef _WIN32 - /* Windows runtime library always open file descriptors 0, 1 and 2 in text mode, therefore we can't use them for binary I/O */ - if(fd < 3) return 0; -#endif - return UTIL_getFDStat(fd, &statbuf); /* Only need to know whether it is a regular file */ -} - - -UTIL_STATIC int UTIL_isRegFile(const char* infilename) -{ - stat_t statbuf; - return UTIL_getFileStat(infilename, &statbuf); /* Only need to know whether it is a regular file */ -} - - -UTIL_STATIC U32 UTIL_isDirectory(const char* infilename) -{ - int r; - stat_t statbuf; -#if defined(_MSC_VER) - r = _stat64(infilename, &statbuf); - if (!r && (statbuf.st_mode & _S_IFDIR)) return 1; -#else - r = stat(infilename, &statbuf); - if (!r && S_ISDIR(statbuf.st_mode)) return 1; -#endif - return 0; -} - - -UTIL_STATIC U64 UTIL_getOpenFileSize(FILE* file) -{ - int r; - int fd; - struct UTIL_TYPE_stat statbuf; - - fd = UTIL_fileno(file); - if (fd < 0) { - perror("fileno"); - exit(1); - } - r = UTIL_fstat(fd, &statbuf); - if (r || !UTIL_STAT_MODE_ISREG(statbuf.st_mode)) return 0; /* No good... */ - return (U64)statbuf.st_size; -} - - -UTIL_STATIC U64 UTIL_getFileSize(const char* infilename) -{ - int r; - struct UTIL_TYPE_stat statbuf; - - r = UTIL_stat(infilename, &statbuf); - if (r || !UTIL_STAT_MODE_ISREG(statbuf.st_mode)) return 0; /* No good... */ - return (U64)statbuf.st_size; -} - - -UTIL_STATIC U64 UTIL_getTotalFileSize(const char** fileNamesTable, unsigned nbFiles) -{ - U64 total = 0; - unsigned n; - for (n=0; n= *bufEnd) { - ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE; - *bufStart = (char*)UTIL_realloc(*bufStart, newListSize); - *bufEnd = *bufStart + newListSize; - if (*bufStart == NULL) { free(path); FindClose(hFile); return 0; } - } - if (*bufStart + *pos + pathLength < *bufEnd) { - strncpy(*bufStart + *pos, path, *bufEnd - (*bufStart + *pos)); - *pos += pathLength + 1; - nbFiles++; - } - } - free(path); - } while (FindNextFileA(hFile, &cFile)); - - FindClose(hFile); - assert(nbFiles < INT_MAX); - return (int)nbFiles; -} - -#elif defined(__linux__) || (PLATFORM_POSIX_VERSION >= 200112L) /* opendir, readdir require POSIX.1-2001 */ -# define UTIL_HAS_CREATEFILELIST -# include /* opendir, readdir */ -# include /* strerror, memcpy */ - -UTIL_STATIC int UTIL_prepareFileList(const char* dirName, char** bufStart, size_t* pos, char** bufEnd) -{ - DIR* dir; - struct dirent * entry; - size_t dirLength; - int nbFiles = 0; - - if (!(dir = opendir(dirName))) { - fprintf(stderr, "Cannot open directory '%s': %s\n", dirName, strerror(errno)); - return 0; - } - - dirLength = strlen(dirName); - errno = 0; - while ((entry = readdir(dir)) != NULL) { - char* path; - size_t fnameLength, pathLength; - if (strcmp (entry->d_name, "..") == 0 || - strcmp (entry->d_name, ".") == 0) continue; - fnameLength = strlen(entry->d_name); - path = (char*)malloc(dirLength + fnameLength + 2); - if (!path) { closedir(dir); return 0; } - memcpy(path, dirName, dirLength); - path[dirLength] = '/'; - memcpy(path+dirLength+1, entry->d_name, fnameLength); - pathLength = dirLength+1+fnameLength; - path[pathLength] = 0; - - if (UTIL_isDirectory(path)) { - nbFiles += UTIL_prepareFileList(path, bufStart, pos, bufEnd); /* Recursively call "UTIL_prepareFileList" with the new path. */ - if (*bufStart == NULL) { free(path); closedir(dir); return 0; } - } else { - if (*bufStart + *pos + pathLength >= *bufEnd) { - size_t const newListSize = (size_t)(*bufEnd - *bufStart) + LIST_SIZE_INCREASE; - *bufStart = (char*)UTIL_realloc(*bufStart, newListSize); - *bufEnd = *bufStart + newListSize; - if (*bufStart == NULL) { free(path); closedir(dir); return 0; } - } - if (*bufStart + *pos + pathLength < *bufEnd) { - strncpy(*bufStart + *pos, path, *bufEnd - (*bufStart + *pos)); - *pos += pathLength + 1; - nbFiles++; - } - } - free(path); - errno = 0; /* clear errno after UTIL_isDirectory, UTIL_prepareFileList */ - } - - if (errno != 0) { - fprintf(stderr, "readdir(%s) error: %s\n", dirName, strerror(errno)); - free(*bufStart); - *bufStart = NULL; - } - closedir(dir); - return nbFiles; -} - -#else - -UTIL_STATIC int UTIL_prepareFileList(const char* dirName, char** bufStart, size_t* pos, char** bufEnd) -{ - (void)bufStart; (void)bufEnd; (void)pos; - fprintf(stderr, "Directory %s ignored (compiled without _WIN32 or _POSIX_C_SOURCE)\n", dirName); - return 0; -} - -#endif /* #ifdef _WIN32 */ - -/* - * UTIL_createFileList - takes a list of files and directories (params: inputNames, inputNamesNb), scans directories, - * and returns a new list of files (params: return value, allocatedBuffer, allocatedNamesNb). - * After finishing usage of the list the structures should be freed with UTIL_freeFileList(params: return value, allocatedBuffer) - * In case of error UTIL_createFileList returns NULL and UTIL_freeFileList should not be called. - */ -UTIL_STATIC const char** -UTIL_createFileList(const char** inputNames, unsigned inputNamesNb, - char** allocatedBuffer, unsigned* allocatedNamesNb) -{ - size_t pos; - unsigned i, nbFiles; - char* buf = (char*)malloc(LIST_SIZE_INCREASE); - size_t bufSize = LIST_SIZE_INCREASE; - const char** fileTable; - - if (!buf) return NULL; - - for (i=0, pos=0, nbFiles=0; i= bufSize) { - while (pos + len >= bufSize) bufSize += LIST_SIZE_INCREASE; - buf = (char*)UTIL_realloc(buf, bufSize); - if (!buf) return NULL; - } - assert(pos + len < bufSize); - memcpy(buf + pos, inputNames[i], len); - pos += len; - nbFiles++; - } else { - char* bufend = buf + bufSize; - nbFiles += (unsigned)UTIL_prepareFileList(inputNames[i], &buf, &pos, &bufend); - if (buf == NULL) return NULL; - assert(bufend > buf); - bufSize = (size_t)(bufend - buf); - } } - - if (nbFiles == 0) { free(buf); return NULL; } - - fileTable = (const char**)malloc(((size_t)nbFiles+1) * sizeof(const char*)); - if (!fileTable) { free(buf); return NULL; } - - for (i=0, pos=0; i bufSize) { - free(buf); - free((void*)fileTable); - return NULL; - } /* can this happen ? */ - - *allocatedBuffer = buf; - *allocatedNamesNb = nbFiles; - - return fileTable; -} - - -UTIL_STATIC void -UTIL_freeFileList(const char** filenameTable, char* allocatedBuffer) -{ - free(allocatedBuffer); - free((void*)filenameTable); -} - - -#if defined (__cplusplus) -} -#endif - -#endif /* UTIL_H_MODULE */ diff --git a/librocksdb-sys/lz4/tests/.gitignore b/librocksdb-sys/lz4/tests/.gitignore deleted file mode 100644 index c7d8f19..0000000 --- a/librocksdb-sys/lz4/tests/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ - -# build artefacts -datagen -frametest -frametest32 -fullbench -fullbench32 -fuzzer -fuzzer32 -fasttest -roundTripTest -checkTag -checkFrame -decompress-partial -decompress-partial-usingDict -abiTest -freestanding - -# test artefacts -tmp* -versionsTest -abiTests -lz4_all.c - -# local tests -afl diff --git a/librocksdb-sys/lz4/tests/COPYING b/librocksdb-sys/lz4/tests/COPYING deleted file mode 100644 index d159169..0000000 --- a/librocksdb-sys/lz4/tests/COPYING +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/librocksdb-sys/lz4/tests/Makefile b/librocksdb-sys/lz4/tests/Makefile deleted file mode 100644 index 93a5581..0000000 --- a/librocksdb-sys/lz4/tests/Makefile +++ /dev/null @@ -1,618 +0,0 @@ -# ########################################################################## -# LZ4 programs - Makefile -# Copyright (C) Yann Collet 2011-2020 -# -# GPL v2 License -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# You can contact the author at : -# - LZ4 homepage : http://www.lz4.org -# - LZ4 source repository : https://github.com/lz4/lz4 -# ########################################################################## -# fuzzer : Test tool, to check lz4 integrity on target platform -# frametest : Test tool, to check lz4frame integrity on target platform -# fullbench : Precisely measure speed for each LZ4 function variant -# datagen : generates synthetic data samples for tests & benchmarks -# ########################################################################## - -LZ4DIR := ../lib -PRGDIR := ../programs -TESTDIR := versionsTest -PYTHON ?= python3 - -DEBUGLEVEL?= 1 -DEBUGFLAGS = -g -DLZ4_DEBUG=$(DEBUGLEVEL) -CFLAGS ?= -O3 # can select custom optimization flags. Example : CFLAGS=-O2 make -CFLAGS += -Wall -Wextra -Wundef -Wcast-qual -Wcast-align -Wshadow \ - -Wswitch-enum -Wdeclaration-after-statement -Wstrict-prototypes \ - -Wpointer-arith -Wstrict-aliasing=1 -CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) -CPPFLAGS+= -I$(LZ4DIR) -I$(PRGDIR) -DXXH_NAMESPACE=LZ4_ -FLAGS = $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) - -include ../Makefile.inc - -LZ4 := $(PRGDIR)/lz4$(EXT) - - -# Default test parameters -TEST_FILES := COPYING -FUZZER_TIME := -T90s -NB_LOOPS ?= -i1 - -.PHONY: default -default: all - -all: fullbench fuzzer frametest roundTripTest datagen checkFrame decompress-partial - -all32: CFLAGS+=-m32 -all32: all - -lz4: - $(MAKE) -C $(PRGDIR) $@ CFLAGS="$(CFLAGS)" - -lib liblz4.pc: - $(MAKE) -C $(LZ4DIR) $@ CFLAGS="$(CFLAGS)" - -lz4c unlz4 lz4cat: lz4 - $(LN_SF) $(LZ4) $(PRGDIR)/$@ - -lz4c32: # create a 32-bits version for 32/64 interop tests - $(MAKE) -C $(PRGDIR) $@ CFLAGS="-m32 $(CFLAGS)" - -%.o : $(LZ4DIR)/%.c $(LZ4DIR)/%.h - $(CC) -c $(CFLAGS) $(CPPFLAGS) $< -o $@ - -fullbench : DEBUGLEVEL=0 -fullbench : lz4.o lz4hc.o lz4frame.o xxhash.o fullbench.c - $(CC) $(FLAGS) $^ -o $@$(EXT) - -$(LZ4DIR)/liblz4.a: - $(MAKE) -C $(LZ4DIR) liblz4.a - -fullbench-lib: fullbench.c $(LZ4DIR)/liblz4.a - $(CC) $(FLAGS) $^ -o $@$(EXT) - -fullbench-dll: fullbench.c $(LZ4DIR)/xxhash.c - $(MAKE) -C $(LZ4DIR) liblz4 - $(CC) $(FLAGS) $^ -o $@$(EXT) -DLZ4_DLL_IMPORT=1 $(LZ4DIR)/dll/$(LIBLZ4).dll - -# test LZ4_USER_MEMORY_FUNCTIONS -fullbench-wmalloc: CPPFLAGS += -DLZ4_USER_MEMORY_FUNCTIONS -fullbench-wmalloc: fullbench - -fuzzer : lz4.o lz4hc.o xxhash.o fuzzer.c - $(CC) $(FLAGS) $^ -o $@$(EXT) - -frametest: lz4frame.o lz4.o lz4hc.o xxhash.o frametest.c - $(CC) $(FLAGS) $^ -o $@$(EXT) - -roundTripTest : lz4.o lz4hc.o xxhash.o roundTripTest.c - $(CC) $(FLAGS) $^ -o $@$(EXT) - -datagen : $(PRGDIR)/datagen.c datagencli.c - $(CC) $(FLAGS) -I$(PRGDIR) $^ -o $@$(EXT) - -checkFrame : lz4frame.o lz4.o lz4hc.o xxhash.o checkFrame.c - $(CC) $(FLAGS) $^ -o $@$(EXT) - -decompress-partial: lz4.o decompress-partial.c - $(CC) $(FLAGS) $^ -o $@$(EXT) - -decompress-partial-usingDict: lz4.o decompress-partial-usingDict.c - $(CC) $(FLAGS) $^ -o $@$(EXT) - -freestanding: freestanding.c - $(CC) -ffreestanding -nostdlib $^ -o $@$(EXT) - -.PHONY: clean -clean: - @$(MAKE) -C $(LZ4DIR) $@ > $(VOID) - @$(MAKE) -C $(PRGDIR) $@ > $(VOID) - @$(RM) -rf core *.o *.test tmp* \ - fullbench-dll$(EXT) fullbench-lib$(EXT) \ - fullbench$(EXT) fullbench32$(EXT) \ - fuzzer$(EXT) fuzzer32$(EXT) \ - frametest$(EXT) frametest32$(EXT) \ - fasttest$(EXT) roundTripTest$(EXT) \ - datagen$(EXT) checkTag$(EXT) \ - frameTest$(EXT) decompress-partial$(EXT) \ - abiTest$(EXT) freestanding$(EXT) \ - lz4_all.c - @$(RM) -rf $(TESTDIR) - @echo Cleaning completed - -.PHONY: versionsTest -versionsTest: - $(PYTHON) test-lz4-versions.py - -.PHONY: listTest -listTest: lz4 - QEMU_SYS=$(QEMU_SYS) $(PYTHON) test-lz4-list.py - -abiTest: LDLIBS += -llz4 - -.PHONY: abiTests -abiTests: - $(PYTHON) test-lz4-abi.py - -checkTag: checkTag.c $(LZ4DIR)/lz4.h - $(CC) $(FLAGS) $< -o $@$(EXT) - -#----------------------------------------------------------------------------- -# validated only for Linux, OSX, BSD, Hurd and Solaris targets -#----------------------------------------------------------------------------- -ifeq ($(POSIX_ENV),Yes) - -MD5:=md5sum -ifneq (,$(filter $(shell $(UNAME)), Darwin )) -MD5:=md5 -r -endif - -# note : we should probably settle on a single compare utility -CMP:=cmp -GREP:=grep -DIFF:=diff -ifneq (,$(filter $(shell $(UNAME)),SunOS)) -DIFF:=gdiff -endif - -CAT:=cat -DD:=dd -DATAGEN:=./datagen - -.PHONY: list -list: - @$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs - -.PHONY: check -check: test-lz4-essentials - -.PHONY: test -test: test-lz4 test-lz4c test-frametest test-fullbench test-fuzzer test-amalgamation listTest test-decompress-partial - -.PHONY: test32 -test32: CFLAGS+=-m32 -test32: test - -.PHONY: test-amalgamation -test-amalgamation: lz4_all.o - -lz4_all.c: $(LZ4DIR)/lz4.c $(LZ4DIR)/lz4hc.c $(LZ4DIR)/lz4frame.c - $(CAT) $^ > $@ - -.PHONY: test-install -test-install: lz4 lib liblz4.pc - lz4_root=.. ./test_install.sh - -.PHONY: test-compile-with-lz4-memory-usage -test-compile-with-lz4-memory-usage: - $(MAKE) clean; CFLAGS=-O0 CPPFLAGS=-D'LZ4_MEMORY_USAGE=LZ4_MEMORY_USAGE_MIN' $(MAKE) all - $(MAKE) clean; CFLAGS=-O0 CPPFLAGS=-D'LZ4_MEMORY_USAGE=LZ4_MEMORY_USAGE_MAX' $(MAKE) all - -.PHONY: test-lz4-sparse -# Rules regarding Temporary test files : -# Each test must use its own unique set of names during execution. -# Each temporary test file must begin by an FPREFIX. -# Each FPREFIX must be unique for each test. -# All FPREFIX must start with `tmp`, for `make clean` -# All tests must clean their temporary test files on successful completion, -# and only their test files : do not employ sweeping statements such `rm tmp*` or `rm *.lz4` -test-lz4-sparse: FPREFIX = tmp-tls -test-lz4-sparse: lz4 datagen - @echo "\n ---- test sparse file support ----" - $(DATAGEN) -g5M -P100 > $(FPREFIX)dg5M - $(LZ4) -B4D $(FPREFIX)dg5M -c | $(LZ4) -dv --sparse > $(FPREFIX)cB4 - $(DIFF) -s $(FPREFIX)dg5M $(FPREFIX)cB4 - $(LZ4) -B5D $(FPREFIX)dg5M -c | $(LZ4) -dv --sparse > $(FPREFIX)cB5 - $(DIFF) -s $(FPREFIX)dg5M $(FPREFIX)cB5 - $(LZ4) -B6D $(FPREFIX)dg5M -c | $(LZ4) -dv --sparse > $(FPREFIX)cB6 - $(DIFF) -s $(FPREFIX)dg5M $(FPREFIX)cB6 - $(LZ4) -B7D $(FPREFIX)dg5M -c | $(LZ4) -dv --sparse > $(FPREFIX)cB7 - $(DIFF) -s $(FPREFIX)dg5M $(FPREFIX)cB7 - $(LZ4) $(FPREFIX)dg5M -c | $(LZ4) -dv --no-sparse > $(FPREFIX)nosparse - $(DIFF) -s $(FPREFIX)dg5M $(FPREFIX)nosparse - ls -ls $(FPREFIX)* - $(DATAGEN) -s1 -g1200007 -P100 | $(LZ4) | $(LZ4) -dv --sparse > $(FPREFIX)odd # Odd size file (to generate non-full last block) - $(DATAGEN) -s1 -g1200007 -P100 | $(DIFF) -s - $(FPREFIX)odd - ls -ls $(FPREFIX)odd - @$(RM) $(FPREFIX)* - @echo "\n Compatibility with Console :" - echo "Hello World 1 !" | $(LZ4) | $(LZ4) -d -c - echo "Hello World 2 !" | $(LZ4) | $(LZ4) -d | $(CAT) - echo "Hello World 3 !" | $(LZ4) --no-frame-crc | $(LZ4) -d -c - @echo "\n Compatibility with Append :" - $(DATAGEN) -P100 -g1M > $(FPREFIX)dg1M - $(CAT) $(FPREFIX)dg1M $(FPREFIX)dg1M > $(FPREFIX)2M - $(LZ4) -B5 -v $(FPREFIX)dg1M $(FPREFIX)c - $(LZ4) -d -v $(FPREFIX)c $(FPREFIX)r - $(LZ4) -d -v $(FPREFIX)c -c >> $(FPREFIX)r - ls -ls $(FPREFIX)* - $(DIFF) $(FPREFIX)2M $(FPREFIX)r - @$(RM) $(FPREFIX)* - -test-lz4-contentSize: FPREFIX = tmp-lzc -test-lz4-contentSize: lz4 datagen - @echo "\n ---- test original size support ----" - $(DATAGEN) -g15M > $(FPREFIX) - $(LZ4) -v $(FPREFIX) -c | $(LZ4) -t - $(LZ4) -v --content-size $(FPREFIX) -c | $(LZ4) -d > $(FPREFIX)-dup - $(DIFF) $(FPREFIX) $(FPREFIX)-dup - $(LZ4) -f $(FPREFIX) -c > $(FPREFIX).lz4 # compressed with content size - $(LZ4) --content-size $(FPREFIX) -c > $(FPREFIX)-wcz.lz4 - ! $(DIFF) $(FPREFIX).lz4 $(FPREFIX)-wcz.lz4 # must differ, due to content size - $(LZ4) --content-size < $(FPREFIX) > $(FPREFIX)-wcz2.lz4 # can determine content size because stdin is just a file - $(DIFF) $(FPREFIX)-wcz.lz4 $(FPREFIX)-wcz2.lz4 # both must contain content size - $(CAT) $(FPREFIX) | $(LZ4) > $(FPREFIX)-ncz.lz4 - $(DIFF) $(FPREFIX).lz4 $(FPREFIX)-ncz.lz4 # both don't have content size - $(CAT) $(FPREFIX) | $(LZ4) --content-size > $(FPREFIX)-ncz2.lz4 # can't determine content size - $(DIFF) $(FPREFIX).lz4 $(FPREFIX)-ncz2.lz4 # both don't have content size - @$(RM) $(FPREFIX)* - -test-lz4-frame-concatenation: FPREFIX = tmp-lfc -test-lz4-frame-concatenation: lz4 datagen - @echo "\n ---- test frame concatenation ----" - @echo -n > $(FPREFIX)-empty - @echo hi > $(FPREFIX)-nonempty - $(CAT) $(FPREFIX)-nonempty $(FPREFIX)-empty $(FPREFIX)-nonempty > $(FPREFIX)-src - $(LZ4) -zq $(FPREFIX)-empty -c > $(FPREFIX)-empty.lz4 - $(LZ4) -zq $(FPREFIX)-nonempty -c > $(FPREFIX)-nonempty.lz4 - $(CAT) $(FPREFIX)-nonempty.lz4 $(FPREFIX)-empty.lz4 $(FPREFIX)-nonempty.lz4 > $(FPREFIX)-concat.lz4 - $(LZ4) -d $(FPREFIX)-concat.lz4 -c > $(FPREFIX)-result - $(CMP) $(FPREFIX)-src $(FPREFIX)-result - @$(RM) $(FPREFIX)* - @echo frame concatenation test completed - -test-lz4-multiple: FPREFIX = tmp-tml -test-lz4-multiple: lz4 datagen - @echo "\n ---- test multiple files ----" - @$(DATAGEN) -s1 > $(FPREFIX)1 2> $(VOID) - @$(DATAGEN) -s2 -g100K > $(FPREFIX)2 2> $(VOID) - @$(DATAGEN) -s3 -g200K > $(FPREFIX)3 2> $(VOID) - # compress multiple files : one .lz4 per source file - $(LZ4) -f -m $(FPREFIX)* - test -f $(FPREFIX)1.lz4 - test -f $(FPREFIX)2.lz4 - test -f $(FPREFIX)3.lz4 - # decompress multiple files : one output file per .lz4 - mv $(FPREFIX)1 $(FPREFIX)1-orig - mv $(FPREFIX)2 $(FPREFIX)2-orig - mv $(FPREFIX)3 $(FPREFIX)3-orig - $(LZ4) -d -f -m $(FPREFIX)*.lz4 - $(CMP) $(FPREFIX)1 $(FPREFIX)1-orig # must be identical - $(CMP) $(FPREFIX)2 $(FPREFIX)2-orig - $(CMP) $(FPREFIX)3 $(FPREFIX)3-orig - # compress multiple files into stdout - $(CAT) $(FPREFIX)1.lz4 $(FPREFIX)2.lz4 $(FPREFIX)3.lz4 > $(FPREFIX)-concat1 - $(RM) $(FPREFIX)*.lz4 - $(LZ4) -m $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 -c > $(FPREFIX)-concat2 - test ! -f $(FPREFIX)1.lz4 # must not create .lz4 artefact - $(CMP) $(FPREFIX)-concat1 $(FPREFIX)-concat2 # must be equivalent - # decompress multiple files into stdout - $(RM) $(FPREFIX)-concat1 $(FPREFIX)-concat2 - $(LZ4) -f -m $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 # generate .lz4 to decompress - $(CAT) $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 > $(FPREFIX)-concat1 # create concatenated reference - $(RM) $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 - $(LZ4) -d -m $(FPREFIX)1.lz4 $(FPREFIX)2.lz4 $(FPREFIX)3.lz4 -c > $(FPREFIX)-concat2 - test ! -f $(FPREFIX)1 # must not create file artefact - $(CMP) $(FPREFIX)-concat1 $(FPREFIX)-concat2 # must be equivalent - # compress multiple files, one of which is absent (must fail) - ! $(LZ4) -f -m $(FPREFIX)-concat1 notHere $(FPREFIX)-concat2 # must fail : notHere not present - # test lz4-compressed file - $(LZ4) -tm $(FPREFIX)-concat1.lz4 - $(LZ4) -tm $(FPREFIX)-concat1.lz4 $(FPREFIX)-concat2.lz4 - # test multiple lz4 files, one of which is absent (must fail) - ! $(LZ4) -tm $(FPREFIX)-concat1.lz4 notHere.lz4 $(FPREFIX)-concat2.lz4 - @$(RM) $(FPREFIX)* - -test-lz4-multiple-legacy: FPREFIX = tmp-lml -test-lz4-multiple-legacy: lz4 datagen - @echo "\n ---- test multiple files (Legacy format) ----" - @$(DATAGEN) -s1 > $(FPREFIX)1 2> $(VOID) - @$(DATAGEN) -s2 -g100K > $(FPREFIX)2 2> $(VOID) - @$(DATAGEN) -s3 -g200K > $(FPREFIX)3 2> $(VOID) - # compress multiple files using legacy format: one .lz4 per source file - $(LZ4) -f -l -m $(FPREFIX)* - test -f $(FPREFIX)1.lz4 - test -f $(FPREFIX)2.lz4 - test -f $(FPREFIX)3.lz4 - # decompress multiple files compressed using legacy format: one output file per .lz4 - mv $(FPREFIX)1 $(FPREFIX)1-orig - mv $(FPREFIX)2 $(FPREFIX)2-orig - mv $(FPREFIX)3 $(FPREFIX)3-orig - $(LZ4) -d -f -m $(FPREFIX)*.lz4 - $(LZ4) -l -d -f -m $(FPREFIX)*.lz4 # -l mustn't impact -d option - $(CMP) $(FPREFIX)1 $(FPREFIX)1-orig # must be identical - $(CMP) $(FPREFIX)2 $(FPREFIX)2-orig - $(CMP) $(FPREFIX)3 $(FPREFIX)3-orig - # compress multiple files into stdout using legacy format - $(CAT) $(FPREFIX)1.lz4 $(FPREFIX)2.lz4 $(FPREFIX)3.lz4 > $(FPREFIX)-concat1 - $(RM) $(FPREFIX)*.lz4 - $(LZ4) -l -m $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 -c > $(FPREFIX)-concat2 - test ! -f $(FPREFIX)1.lz4 # must not create .lz4 artefact - $(CMP) $(FPREFIX)-concat1 $(FPREFIX)-concat2 # must be equivalent - # # # decompress multiple files into stdout using legacy format - $(RM) $(FPREFIX)-concat1 $(FPREFIX)-concat2 - $(LZ4) -l -f -m $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 # generate .lz4 to decompress - $(CAT) $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 > $(FPREFIX)-concat1 # create concatenated reference - $(RM) $(FPREFIX)1 $(FPREFIX)2 $(FPREFIX)3 - $(LZ4) -d -m $(FPREFIX)1.lz4 $(FPREFIX)2.lz4 $(FPREFIX)3.lz4 -c > $(FPREFIX)-concat2 - $(LZ4) -d -l -m $(FPREFIX)1.lz4 $(FPREFIX)2.lz4 $(FPREFIX)3.lz4 -c > $(FPREFIX)-concat2 # -l mustn't impact option -d - test ! -f $(FPREFIX)1 # must not create file artefact - $(CMP) $(FPREFIX)-concat1 $(FPREFIX)-concat2 # must be equivalent - # # # compress multiple files, one of which is absent (must fail) - ! $(LZ4) -f -l -m $(FPREFIX)-concat1 notHere-legacy $(FPREFIX)-concat2 # must fail : notHere-legacy not present - @$(RM) $(FPREFIX)* - -SKIPFILE = goldenSamples/skip.bin -test-lz4-skippable: FPREFIX = tmp-lsk -test-lz4-skippable: lz4 datagen - @echo "\n ---- test lz4 with skippable frames ----" - $(LZ4) -dc $(SKIPFILE) - $(LZ4) -dc < $(SKIPFILE) - cat $(SKIPFILE) | $(LZ4) -dc - echo "Hello from Valid Frame!\n" | $(LZ4) -c > $(FPREFIX).lz4 - cat $(SKIPFILE) $(FPREFIX).lz4 $(SKIPFILE) | $(LZ4) -dc - $(RM) $(FPREFIX)* - -test-lz4-basic: FPREFIX = tmp-tlb -test-lz4-basic: lz4 datagen unlz4 lz4cat - @echo "\n ---- test lz4 basic compression/decompression ----" - $(DATAGEN) -g0 | $(LZ4) -v | $(LZ4) -t - $(DATAGEN) -g16KB | $(LZ4) -9 | $(LZ4) -t - $(DATAGEN) -g20KB > $(FPREFIX)-dg20k - $(LZ4) < $(FPREFIX)-dg20k | $(LZ4) -d > $(FPREFIX)-dec - $(DIFF) -q $(FPREFIX)-dg20k $(FPREFIX)-dec - $(LZ4) --no-frame-crc < $(FPREFIX)-dg20k | $(LZ4) -d > $(FPREFIX)-dec - $(DIFF) -q $(FPREFIX)-dg20k $(FPREFIX)-dec - $(DATAGEN) | $(LZ4) -BI | $(LZ4) -t - $(DATAGEN) | $(LZ4) --no-crc | $(LZ4) -t - $(DATAGEN) -g6M -P99 | $(LZ4) -9BD | $(LZ4) -t - $(DATAGEN) -g17M | $(LZ4) -9v | $(LZ4) -qt - $(DATAGEN) -g33M | $(LZ4) --no-frame-crc | $(LZ4) -t - $(DATAGEN) -g256MB | $(LZ4) -vqB4D | $(LZ4) -t --no-crc - @echo "hello world" > $(FPREFIX)-hw - $(LZ4) --rm -f $(FPREFIX)-hw $(FPREFIX)-hw.lz4 - test ! -f $(FPREFIX)-hw # must fail (--rm) - test -f $(FPREFIX)-hw.lz4 - $(PRGDIR)/lz4cat $(FPREFIX)-hw.lz4 | $(GREP) "hello world" - $(PRGDIR)/unlz4 --rm $(FPREFIX)-hw.lz4 $(FPREFIX)-hw - test -f $(FPREFIX)-hw - test ! -f $(FPREFIX)-hw.lz4 # must fail (--rm) - test ! -f $(FPREFIX)-hw.lz4.lz4 # must fail (unlz4) - $(PRGDIR)/lz4cat $(FPREFIX)-hw # pass-through mode - test -f $(FPREFIX)-hw - test ! -f $(FPREFIX)-hw.lz4 # must fail (lz4cat) - $(LZ4) $(FPREFIX)-hw $(FPREFIX)-hw.lz4 # creates $(FPREFIX)-hw.lz4 - $(PRGDIR)/lz4cat < $(FPREFIX)-hw.lz4 > $(FPREFIX)3 # checks lz4cat works with stdin (#285) - $(DIFF) -q $(FPREFIX)-hw $(FPREFIX)3 - $(PRGDIR)/lz4cat < $(FPREFIX)-hw > $(FPREFIX)2 # checks lz4cat works in pass-through mode - $(DIFF) -q $(FPREFIX)-hw $(FPREFIX)2 - cp $(FPREFIX)-hw ./-d - $(LZ4) --rm -- -d -d.lz4 # compresses ./d into ./-d.lz4 - test -f ./-d.lz4 - test ! -f ./-d - mv ./-d.lz4 ./-z - $(LZ4) -d --rm -- -z $(FPREFIX)4 # uncompresses ./-z into $(FPREFIX)4 - test ! -f ./-z - $(DIFF) -q $(FPREFIX)-hw $(FPREFIX)4 - ! $(LZ4) $(FPREFIX)2 $(FPREFIX)3 $(FPREFIX)4 # must fail: refuse to handle 3+ file names - $(LZ4) -f $(FPREFIX)-hw # create $(FPREFIX)-hw.lz4, for next tests - $(LZ4) --list $(FPREFIX)-hw.lz4 # test --list on valid single-frame file - $(LZ4) --list < $(FPREFIX)-hw.lz4 # test --list from stdin (file only) - $(CAT) $(FPREFIX)-hw >> $(FPREFIX)-hw.lz4 - ! $(LZ4) -f $(FPREFIX)-hw.lz4 # uncompress valid frame followed by invalid data (must fail now) - $(LZ4) -BX $(FPREFIX)-hw -c -q | $(LZ4) -tv # test block checksum - # $(DATAGEN) -g20KB generates the same file every single time - # cannot save output of $(DATAGEN) -g20KB as input file to lz4 because the following shell commands are run before $(DATAGEN) -g20KB - test "$(shell $(DATAGEN) -g20KB | $(LZ4) -c --fast | wc -c)" -lt "$(shell $(DATAGEN) -g20KB | $(LZ4) -c --fast=9 | wc -c)" # -1 vs -9 - test "$(shell $(DATAGEN) -g20KB | $(LZ4) -c -1 | wc -c)" -lt "$(shell $(DATAGEN) -g20KB| $(LZ4) -c --fast=1 | wc -c)" # 1 vs -1 - test "$(shell $(DATAGEN) -g20KB | $(LZ4) -c --fast=1 | wc -c)" -eq "$(shell $(DATAGEN) -g20KB| $(LZ4) -c --fast| wc -c)" # checks default fast compression is -1 - ! $(LZ4) -c --fast=0 $(FPREFIX)-dg20K # lz4 should fail when fast=0 - ! $(LZ4) -c --fast=-1 $(FPREFIX)-dg20K # lz4 should fail when fast=-1 - # High --fast values can result in out-of-bound dereferences #876 - $(DATAGEN) -g1M | $(LZ4) -c --fast=999999999 > /dev/null - # Test for #596 - @echo "TEST" > $(FPREFIX)-test - $(LZ4) -m $(FPREFIX)-test - $(LZ4) $(FPREFIX)-test.lz4 $(FPREFIX)-test2 - $(DIFF) -q $(FPREFIX)-test $(FPREFIX)-test2 - @$(RM) $(FPREFIX)* - - -test-lz4-dict: FPREFIX = tmp-dict -test-lz4-dict: lz4 datagen - @echo "\n ---- test lz4 compression/decompression with dictionary ----" - $(DATAGEN) -g16KB > $(FPREFIX) - $(DATAGEN) -g32KB > $(FPREFIX)-sample-32k - < $(FPREFIX)-sample-32k $(LZ4) -D $(FPREFIX) | $(LZ4) -dD $(FPREFIX) | diff - $(FPREFIX)-sample-32k - $(DATAGEN) -g128MB > $(FPREFIX)-sample-128m - < $(FPREFIX)-sample-128m $(LZ4) -D $(FPREFIX) | $(LZ4) -dD $(FPREFIX) | diff - $(FPREFIX)-sample-128m - touch $(FPREFIX)-sample-0 - < $(FPREFIX)-sample-0 $(LZ4) -D $(FPREFIX) | $(LZ4) -dD $(FPREFIX) | diff - $(FPREFIX)-sample-0 - - < $(FPREFIX)-sample-32k $(LZ4) -D $(FPREFIX)-sample-0 | $(LZ4) -dD $(FPREFIX)-sample-0 | diff - $(FPREFIX)-sample-32k - < $(FPREFIX)-sample-0 $(LZ4) -D $(FPREFIX)-sample-0 | $(LZ4) -dD $(FPREFIX)-sample-0 | diff - $(FPREFIX)-sample-0 - - @echo "\n ---- test lz4 dictionary loading ----" - $(DATAGEN) -g128KB > $(FPREFIX)-data-128KB - set -e; \ - for l in 0 1 4 128 32767 32768 32769 65535 65536 65537 98303 98304 98305 131071 131072 131073; do \ - $(DATAGEN) -g$$l > $(FPREFIX)-$$l; \ - $(DD) if=$(FPREFIX)-$$l of=$(FPREFIX)-$$l-tail bs=1 count=65536 skip=$$((l > 65536 ? l - 65536 : 0)); \ - < $(FPREFIX)-$$l $(LZ4) -D stdin $(FPREFIX)-data-128KB -c | $(LZ4) -dD $(FPREFIX)-$$l-tail | $(DIFF) - $(FPREFIX)-data-128KB; \ - < $(FPREFIX)-$$l-tail $(LZ4) -D stdin $(FPREFIX)-data-128KB -c | $(LZ4) -dD $(FPREFIX)-$$l | $(DIFF) - $(FPREFIX)-data-128KB; \ - done - @$(RM) $(FPREFIX)* - -test-lz4hc-hugefile: lz4 datagen - @echo "\n ---- test HC compression/decompression of huge files ----" - $(DATAGEN) -g4200MB | $(LZ4) -v3BD | $(LZ4) -qt - -test-lz4-fast-hugefile: FPREFIX = tmp-lfh -test-lz4-fast-hugefile: lz4 datagen - @echo "\n ---- test huge files compression/decompression ----" - $(DATAGEN) -g6GB | $(LZ4) -vB5D | $(LZ4) -qt - # test large file size [2-4] GB - @$(DATAGEN) -g3G -P100 | $(LZ4) -vv | $(LZ4) --decompress --force --sparse - $(FPREFIX)1 - @ls -ls $(FPREFIX)1 - @$(DATAGEN) -g3G -P100 | $(LZ4) --quiet --content-size | $(LZ4) --verbose --decompress --force --sparse - $(FPREFIX)2 - @ls -ls $(FPREFIX)2 - $(DIFF) -s $(FPREFIX)1 $(FPREFIX)2 - @$(RM) $(FPREFIX)* - -test-lz4-hugefile: test-lz4-fast-hugefile test-lz4hc-hugefile - -test-lz4-testmode: FPREFIX = tmp-ltm -test-lz4-testmode: lz4 datagen - @echo "\n ---- bench mode ----" - $(LZ4) -bi0 - $(DATAGEN) > $(FPREFIX) - $(LZ4) -f $(FPREFIX) -c > $(FPREFIX).lz4 - $(LZ4) -bdi0 $(FPREFIX).lz4 # test benchmark decode-only mode - $(LZ4) -bdi0 --no-crc $(FPREFIX).lz4 # test benchmark decode-only mode - @echo "\n ---- test mode ----" - ! $(DATAGEN) | $(LZ4) -t - ! $(DATAGEN) | $(LZ4) -tf - @echo "\n ---- pass-through mode ----" - @echo "Why hello there " > $(FPREFIX)2.lz4 - ! $(LZ4) -f $(FPREFIX)2.lz4 > $(VOID) - ! $(DATAGEN) | $(LZ4) -dc > $(VOID) - ! $(DATAGEN) | $(LZ4) -df > $(VOID) - $(DATAGEN) | $(LZ4) -dcf > $(VOID) - @echo "Hello World !" > $(FPREFIX)1 - $(LZ4) -dcf $(FPREFIX)1 - @echo "from underground..." > $(FPREFIX)2 - $(LZ4) -dcfm $(FPREFIX)1 $(FPREFIX)2 - @echo "\n ---- non-existing source (must fail cleanly) ----" - ! $(LZ4) file-does-not-exist - ! $(LZ4) -f file-does-not-exist - ! $(LZ4) -t file-does-not-exist - ! $(LZ4) -fm file1-dne file2-dne - @$(RM) $(FPREFIX)* - -test-lz4-opt-parser: lz4 datagen - @echo "\n ---- test opt-parser ----" - $(DATAGEN) -g16KB | $(LZ4) -12 | $(LZ4) -t - $(DATAGEN) -P10 | $(LZ4) -12B4 | $(LZ4) -t - $(DATAGEN) -g256K | $(LZ4) -12B4D | $(LZ4) -t - $(DATAGEN) -g512K -P25 | $(LZ4) -12BD | $(LZ4) -t - $(DATAGEN) -g1M | $(LZ4) -12B5 | $(LZ4) -t - $(DATAGEN) -g1M -s2 | $(LZ4) -12B4D | $(LZ4) -t - $(DATAGEN) -g2M -P99 | $(LZ4) -11B4D | $(LZ4) -t - $(DATAGEN) -g4M | $(LZ4) -11vq | $(LZ4) -qt - $(DATAGEN) -g8M | $(LZ4) -11B4 | $(LZ4) -t - $(DATAGEN) -g16M -P90 | $(LZ4) -11B5 | $(LZ4) -t - $(DATAGEN) -g32M -P10 | $(LZ4) -11B5D | $(LZ4) -t - -test-lz4-essentials : lz4 datagen test-lz4-basic test-lz4-multiple test-lz4-multiple-legacy \ - test-lz4-frame-concatenation test-lz4-testmode \ - test-lz4-contentSize test-lz4-dict - -test-lz4: lz4 datagen test-lz4-essentials test-lz4-opt-parser \ - test-lz4-sparse test-lz4-hugefile test-lz4-dict \ - test-lz4-skippable - -test-lz4c: LZ4C = $(LZ4)c -test-lz4c: lz4c datagen - @echo "\n ---- test lz4c variant ----" - $(DATAGEN) -g256MB | $(LZ4C) -l -v | $(LZ4C) -t - -test-lz4c32: CFLAGS+=-m32 -test-lz4c32: test-lz4 - -test-interop-32-64: lz4 lz4c32 datagen - @echo "\n ---- test interoperability 32-bits -vs- 64 bits ----" - $(DATAGEN) -g16KB | $(LZ4)c32 -9 | $(LZ4) -t - $(DATAGEN) -P10 | $(LZ4) -9B4 | $(LZ4)c32 -t - $(DATAGEN) | $(LZ4)c32 | $(LZ4) -t - $(DATAGEN) -g1M | $(LZ4) -3B5 | $(LZ4)c32 -t - $(DATAGEN) -g256MB | $(LZ4)c32 -vqB4D | $(LZ4) -qt - $(DATAGEN) -g1G -P90 | $(LZ4) | $(LZ4)c32 -t - $(DATAGEN) -g6GB | $(LZ4)c32 -vq9BD | $(LZ4) -qt - -test-lz4c32-basic: lz4c32 datagen - @echo "\n ---- test lz4c32 32-bits version ----" - $(DATAGEN) -g16KB | $(LZ4)c32 -9 | $(LZ4)c32 -t - $(DATAGEN) | $(LZ4)c32 | $(LZ4)c32 -t - $(DATAGEN) -g256MB | $(LZ4)c32 -vqB4D | $(LZ4)c32 -qt - $(DATAGEN) -g6GB | $(LZ4)c32 -vqB5D | $(LZ4)c32 -qt - -test-platform: - @echo "\n ---- test lz4 $(QEMU_SYS) platform ----" - $(QEMU_SYS) $(DATAGEN) -g16KB | $(QEMU_SYS) $(LZ4) -9 | $(QEMU_SYS) $(LZ4) -t - $(QEMU_SYS) $(DATAGEN) | $(QEMU_SYS) $(LZ4) | $(QEMU_SYS) $(LZ4) -t - $(QEMU_SYS) $(DATAGEN) -g256MB | $(QEMU_SYS) $(LZ4) -vqB4D | $(QEMU_SYS) $(LZ4) -qt -ifneq ($(QEMU_SYS),qemu-arm-static) - $(QEMU_SYS) $(DATAGEN) -g3GB | $(QEMU_SYS) $(LZ4) -vqB5D | $(QEMU_SYS) $(LZ4) -qt -endif - -test-fullbench: fullbench - ./fullbench --no-prompt $(NB_LOOPS) $(TEST_FILES) - -test-fullbench32: CFLAGS += -m32 -test-fullbench32: test-fullbench - -test-fuzzer: fuzzer - ./fuzzer $(FUZZER_TIME) - -test-fuzzer32: CFLAGS += -m32 -test-fuzzer32: test-fuzzer - -test-frametest: frametest - ./frametest -v $(FUZZER_TIME) - -test-frametest32: CFLAGS += -m32 -test-frametest32: test-frametest - -VALGRIND = valgrind --leak-check=yes --error-exitcode=1 -test-mem: FPREFIX = tmp-tvm -test-mem: lz4 datagen fuzzer frametest fullbench - @echo "\n ---- valgrind tests : memory analyzer ----" - $(VALGRIND) $(DATAGEN) -g50M > $(VOID) - $(DATAGEN) -g16KB > $(FPREFIX)dg16K - $(VALGRIND) $(LZ4) -9 -BD -f $(FPREFIX)dg16K $(VOID) - $(DATAGEN) -g16KB -s2 > $(FPREFIX)dg16K2 - $(DATAGEN) -g16KB -s3 > $(FPREFIX)dg16K3 - $(VALGRIND) $(LZ4) --force --multiple $(FPREFIX)dg16K $(FPREFIX)dg16K2 $(FPREFIX)dg16K3 - $(DATAGEN) -g7MB > $(FPREFIX)dg7M - $(VALGRIND) $(LZ4) -9 -B5D -f $(FPREFIX)dg7M $(FPREFIX)dg16K2 - $(VALGRIND) $(LZ4) -t $(FPREFIX)dg16K2 - $(VALGRIND) $(LZ4) -bi1 $(FPREFIX)dg7M - $(VALGRIND) ./fullbench -i1 $(FPREFIX)dg7M $(FPREFIX)dg16K2 - $(VALGRIND) $(LZ4) -B4D -f -vq $(FPREFIX)dg7M $(VOID) - $(VALGRIND) $(LZ4) --list -m $(FPREFIX)*.lz4 - $(VALGRIND) $(LZ4) --list -m -v $(FPREFIX)*.lz4 - $(RM) $(FPREFIX)* - $(VALGRIND) ./fuzzer -i64 -t1 - $(VALGRIND) ./frametest -i256 - -test-mem32: lz4c32 datagen -# unfortunately, valgrind doesn't seem to work with non-native binary... - -test-decompress-partial : decompress-partial decompress-partial-usingDict - @echo "\n ---- test decompress-partial ----" - ./decompress-partial$(EXT) - @echo "\n ---- test decompress-partial-usingDict ----" - ./decompress-partial-usingDict$(EXT) - -test-freestanding: freestanding - @echo "\n ---- test freestanding ----" - ./freestanding$(EXT) - -strace ./freestanding$(EXT) - -ltrace ./freestanding$(EXT) - -endif diff --git a/librocksdb-sys/lz4/tests/README.md b/librocksdb-sys/lz4/tests/README.md deleted file mode 100644 index 65437de..0000000 --- a/librocksdb-sys/lz4/tests/README.md +++ /dev/null @@ -1,71 +0,0 @@ -Programs and scripts for automated testing of LZ4 -======================================================= - -This directory contains the following programs and scripts: -- `datagen` : Synthetic and parametrable data generator, for tests -- `frametest` : Test tool that checks lz4frame integrity on target platform -- `fullbench` : Precisely measure speed for each lz4 inner functions -- `fuzzer` : Test tool, to check lz4 integrity on target platform -- `test-lz4-speed.py` : script for testing lz4 speed difference between commits -- `test-lz4-versions.py` : compatibility test between lz4 versions stored on Github - - -#### `test-lz4-versions.py` - script for testing lz4 interoperability between versions - -This script creates `versionsTest` directory to which lz4 repository is cloned. -Then all tagged (released) versions of lz4 are compiled. -In the following step interoperability between lz4 versions is checked. - - -#### `test-lz4-speed.py` - script for testing lz4 speed difference between commits - -This script creates `speedTest` directory to which lz4 repository is cloned. -Then it compiles all branches of lz4 and performs a speed benchmark for a given list of files (the `testFileNames` parameter). -After `sleepTime` (an optional parameter, default 300 seconds) seconds the script checks repository for new commits. -If a new commit is found it is compiled and a speed benchmark for this commit is performed. -The results of the speed benchmark are compared to the previous results. -If compression or decompression speed for one of lz4 levels is lower than `lowerLimit` (an optional parameter, default 0.98) the speed benchmark is restarted. -If second results are also lower than `lowerLimit` the warning e-mail is sent to recipients from the list (the `emails` parameter). - -Additional remarks: -- To be sure that speed results are accurate the script should be run on a "stable" target system with no other jobs running in parallel -- Using the script with virtual machines can lead to large variations of speed results -- The speed benchmark is not performed until computers' load average is lower than `maxLoadAvg` (an optional parameter, default 0.75) -- The script sends e-mails using `mutt`; if `mutt` is not available it sends e-mails without attachments using `mail`; if both are not available it only prints a warning - - -The example usage with two test files, one e-mail address, and with an additional message: -``` -./test-lz4-speed.py "silesia.tar calgary.tar" "email@gmail.com" --message "tested on my laptop" --sleepTime 60 -``` - -To run the script in background please use: -``` -nohup ./test-lz4-speed.py testFileNames emails & -``` - -The full list of parameters: -``` -positional arguments: - testFileNames file names list for speed benchmark - emails list of e-mail addresses to send warnings - -optional arguments: - -h, --help show this help message and exit - --message MESSAGE attach an additional message to e-mail - --lowerLimit LOWERLIMIT - send email if speed is lower than given limit - --maxLoadAvg MAXLOADAVG - maximum load average to start testing - --lastCLevel LASTCLEVEL - last compression level for testing - --sleepTime SLEEPTIME - frequency of repository checking in seconds -``` - - -#### License - -All files in this directory are licensed under GPL-v2. -See [COPYING](COPYING) for details. -The text of the license is also included at the top of each source file. diff --git a/librocksdb-sys/lz4/tests/abiTest.c b/librocksdb-sys/lz4/tests/abiTest.c deleted file mode 100644 index e46004a..0000000 --- a/librocksdb-sys/lz4/tests/abiTest.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree), - * meaning you may select, at your option, one of the above-listed licenses. - */ - -/* - * abiTest : - * ensure ABI stability expectations are not broken by a new version -**/ - - -/*=========================================== -* Dependencies -*==========================================*/ -#include /* size_t */ -#include /* malloc, free, exit */ -#include /* fprintf */ -#include /* strcmp */ -#include -#include /* stat */ -#include /* stat */ -#include "xxhash.h" - -#include "lz4.h" -#include "lz4frame.h" - - -/*=========================================== -* Macros -*==========================================*/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) - -#define MSG(...) fprintf(stderr, __VA_ARGS__) - -#define CONTROL_MSG(c, ...) { \ - if ((c)) { \ - MSG(__VA_ARGS__); \ - MSG(" \n"); \ - abort(); \ - } \ -} - - -static size_t checkBuffers(const void* buff1, const void* buff2, size_t buffSize) -{ - const char* const ip1 = (const char*)buff1; - const char* const ip2 = (const char*)buff2; - size_t pos; - - for (pos=0; pos= LZ4_compressBound(srcSize)` - * for compression to be guaranteed to work */ -static void roundTripTest(void* resultBuff, size_t resultBuffCapacity, - void* compressedBuff, size_t compressedBuffCapacity, - const void* srcBuff, size_t srcSize) -{ - int const acceleration = 1; - // Note : can't use LZ4_initStream(), because it's only present since v1.9.0 - memset(&LZ4_cState, 0, sizeof(LZ4_cState)); - { int const cSize = LZ4_compress_fast_continue(&LZ4_cState, (const char*)srcBuff, (char*)compressedBuff, (int)srcSize, (int)compressedBuffCapacity, acceleration); - CONTROL_MSG(cSize == 0, "Compression error !"); - { int const dInit = LZ4_setStreamDecode(&LZ4_dState, NULL, 0); - CONTROL_MSG(dInit == 0, "LZ4_setStreamDecode error !"); - } - { int const dSize = LZ4_decompress_safe_continue (&LZ4_dState, (const char*)compressedBuff, (char*)resultBuff, cSize, (int)resultBuffCapacity); - CONTROL_MSG(dSize < 0, "Decompression detected an error !"); - CONTROL_MSG(dSize != (int)srcSize, "Decompression corruption error : wrong decompressed size !"); - } } - - /* check potential content corruption error */ - assert(resultBuffCapacity >= srcSize); - { size_t const errorPos = checkBuffers(srcBuff, resultBuff, srcSize); - CONTROL_MSG(errorPos != srcSize, - "Silent decoding corruption, at pos %u !!!", - (unsigned)errorPos); - } -} - -static void roundTripCheck(const void* srcBuff, size_t srcSize) -{ - size_t const cBuffSize = LZ4_COMPRESSBOUND(srcSize); - void* const cBuff = malloc(cBuffSize); - void* const rBuff = malloc(cBuffSize); - - if (!cBuff || !rBuff) { - fprintf(stderr, "not enough memory ! \n"); - exit(1); - } - - roundTripTest(rBuff, cBuffSize, - cBuff, cBuffSize, - srcBuff, srcSize); - - free(rBuff); - free(cBuff); -} - - -static size_t getFileSize(const char* infilename) -{ - int r; -#if defined(_MSC_VER) - struct _stat64 statbuf; - r = _stat64(infilename, &statbuf); - if (r || !(statbuf.st_mode & S_IFREG)) return 0; /* No good... */ -#else - struct stat statbuf; - r = stat(infilename, &statbuf); - if (r || !S_ISREG(statbuf.st_mode)) return 0; /* No good... */ -#endif - return (size_t)statbuf.st_size; -} - - -static int isDirectory(const char* infilename) -{ - int r; -#if defined(_MSC_VER) - struct _stat64 statbuf; - r = _stat64(infilename, &statbuf); - if (!r && (statbuf.st_mode & _S_IFDIR)) return 1; -#else - struct stat statbuf; - r = stat(infilename, &statbuf); - if (!r && S_ISDIR(statbuf.st_mode)) return 1; -#endif - return 0; -} - - -/** loadFile() : - * requirement : `buffer` size >= `fileSize` */ -static void loadFile(void* buffer, const char* fileName, size_t fileSize) -{ - FILE* const f = fopen(fileName, "rb"); - if (isDirectory(fileName)) { - MSG("Ignoring %s directory \n", fileName); - exit(2); - } - if (f==NULL) { - MSG("Impossible to open %s \n", fileName); - exit(3); - } - { size_t const readSize = fread(buffer, 1, fileSize, f); - if (readSize != fileSize) { - MSG("Error reading %s \n", fileName); - exit(5); - } } - fclose(f); -} - - -static void fileCheck(const char* fileName) -{ - size_t const fileSize = getFileSize(fileName); - void* const buffer = malloc(fileSize + !fileSize /* avoid 0 */); - if (!buffer) { - MSG("not enough memory \n"); - exit(4); - } - loadFile(buffer, fileName, fileSize); - roundTripCheck(buffer, fileSize); - free (buffer); -} - - -int bad_usage(const char* exeName) -{ - MSG(" \n"); - MSG("bad usage: \n"); - MSG(" \n"); - MSG("%s [Options] fileName \n", exeName); - MSG(" \n"); - MSG("Options: \n"); - MSG("-# : use #=[0-9] compression level (default:0 == random) \n"); - return 1; -} - - -int main(int argCount, const char** argv) -{ - const char* const exeName = argv[0]; - int argNb = 1; - // Note : LZ4_VERSION_STRING requires >= v1.7.3+ - MSG("abiTest, built binary based on API %s \n", LZ4_VERSION_STRING); - // Note : LZ4_versionString() requires >= v1.7.5+ - MSG("currently linked to dll %s \n", LZ4_versionString()); - - assert(argCount >= 1); - if (argCount < 2) return bad_usage(exeName); - - if (argNb >= argCount) return bad_usage(exeName); - - fileCheck(argv[argNb]); - MSG("no pb detected \n"); - return 0; -} diff --git a/librocksdb-sys/lz4/tests/checkFrame.c b/librocksdb-sys/lz4/tests/checkFrame.c deleted file mode 100644 index 946805f..0000000 --- a/librocksdb-sys/lz4/tests/checkFrame.c +++ /dev/null @@ -1,303 +0,0 @@ - /* - checkFrame - verify frame headers - Copyright (C) Yann Collet 2014-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repository : https://github.com/lz4/lz4 - */ - - /*-************************************ - * Includes - **************************************/ - #include "util.h" /* U32 */ - #include /* malloc, free */ - #include /* fprintf */ - #include /* strcmp */ - #include /* clock_t, clock(), CLOCKS_PER_SEC */ - #include - #include "lz4frame.h" /* include multiple times to test correctness/safety */ - #include "lz4frame.h" - #define LZ4F_STATIC_LINKING_ONLY - #include "lz4frame.h" - #include "lz4frame.h" - #include "lz4.h" /* LZ4_VERSION_STRING */ - #define XXH_STATIC_LINKING_ONLY - #include "xxhash.h" /* XXH64 */ - - - /*-************************************ - * Constants - **************************************/ - #define KB *(1U<<10) - #define MB *(1U<<20) - #define GB *(1U<<30) - - - /*-************************************ - * Macros - **************************************/ - #define DISPLAY(...) fprintf(stderr, __VA_ARGS__) - #define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } - - /************************************** - * Exceptions - ***************************************/ - #ifndef DEBUG - # define DEBUG 0 - #endif - #define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__); - #define EXM_THROW(error, ...) \ -{ \ - DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "Error %i : ", error); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - return(error); \ -} - - - -/*-*************************************** -* Local Parameters -*****************************************/ -static U32 no_prompt = 0; -static U32 displayLevel = 2; -static U32 use_pause = 0; - - -/*-******************************************************* -* Fuzzer functions -*********************************************************/ -#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) -#define MAX(a,b) ( (a) > (b) ? (a) : (b) ) - -typedef struct { - void* srcBuffer; - size_t srcBufferSize; - void* dstBuffer; - size_t dstBufferSize; - LZ4F_decompressionContext_t ctx; -} cRess_t; - -static int createCResources(cRess_t* ress) -{ - ress->srcBufferSize = 4 MB; - ress->srcBuffer = malloc(ress->srcBufferSize); - ress->dstBufferSize = 4 MB; - ress->dstBuffer = malloc(ress->dstBufferSize); - - if (!ress->srcBuffer || !ress->dstBuffer) { - free(ress->srcBuffer); - free(ress->dstBuffer); - EXM_THROW(20, "Allocation error : not enough memory"); - } - - if (LZ4F_isError( LZ4F_createDecompressionContext(&(ress->ctx), LZ4F_VERSION) )) { - free(ress->srcBuffer); - free(ress->dstBuffer); - EXM_THROW(21, "Unable to create decompression context"); - } - return 0; -} - -static void freeCResources(cRess_t ress) -{ - free(ress.srcBuffer); - free(ress.dstBuffer); - - (void) LZ4F_freeDecompressionContext(ress.ctx); -} - -int frameCheck(cRess_t ress, FILE* const srcFile, unsigned bsid, size_t blockSize) -{ - LZ4F_errorCode_t nextToLoad = 0; - size_t curblocksize = 0; - int partialBlock = 0; - - /* Main Loop */ - for (;;) { - size_t readSize; - size_t pos = 0; - size_t decodedBytes = ress.dstBufferSize; - size_t remaining; - LZ4F_frameInfo_t frameInfo; - - /* Read input */ - readSize = fread(ress.srcBuffer, 1, ress.srcBufferSize, srcFile); - if (!readSize) break; /* reached end of file or stream */ - - while (pos < readSize) { /* still to read */ - /* Decode Input (at least partially) */ - if (!nextToLoad) { - /* LZ4F_decompress returned 0 : starting new frame */ - curblocksize = 0; - remaining = readSize - pos; - nextToLoad = LZ4F_getFrameInfo(ress.ctx, &frameInfo, (char*)(ress.srcBuffer)+pos, &remaining); - if (LZ4F_isError(nextToLoad)) - EXM_THROW(22, "Error getting frame info: %s", - LZ4F_getErrorName(nextToLoad)); - if (frameInfo.blockSizeID != (LZ4F_blockSizeID_t) bsid) - EXM_THROW(23, "Block size ID %u != expected %u", - frameInfo.blockSizeID, bsid); - pos += remaining; - /* nextToLoad should be block header size */ - remaining = nextToLoad; - decodedBytes = ress.dstBufferSize; - nextToLoad = LZ4F_decompress(ress.ctx, ress.dstBuffer, &decodedBytes, (char*)(ress.srcBuffer)+pos, &remaining, NULL); - if (LZ4F_isError(nextToLoad)) EXM_THROW(24, "Decompression error : %s", LZ4F_getErrorName(nextToLoad)); - pos += remaining; - } - decodedBytes = ress.dstBufferSize; - /* nextToLoad should be just enough to cover the next block */ - if (nextToLoad > (readSize - pos)) { - /* block is not fully contained in current buffer */ - partialBlock = 1; - remaining = readSize - pos; - } else { - if (partialBlock) { - partialBlock = 0; - } - remaining = nextToLoad; - } - nextToLoad = LZ4F_decompress(ress.ctx, ress.dstBuffer, &decodedBytes, (char*)(ress.srcBuffer)+pos, &remaining, NULL); - if (LZ4F_isError(nextToLoad)) EXM_THROW(24, "Decompression error : %s", LZ4F_getErrorName(nextToLoad)); - curblocksize += decodedBytes; - pos += remaining; - if (!partialBlock) { - /* detect small block due to end of frame; the final 4-byte frame checksum could be left in the buffer */ - if ((curblocksize != 0) && (nextToLoad > 4)) { - if (curblocksize != blockSize) - EXM_THROW(25, "Block size %u != expected %u, pos %u\n", - (unsigned)curblocksize, (unsigned)blockSize, (unsigned)pos); - } - curblocksize = 0; - } - } - } - /* can be out because readSize == 0, which could be an fread() error */ - if (ferror(srcFile)) EXM_THROW(26, "Read error"); - - if (nextToLoad!=0) EXM_THROW(27, "Unfinished stream"); - - return 0; -} - -int FUZ_usage(const char* programName) -{ - DISPLAY( "Usage :\n"); - DISPLAY( " %s [args] filename\n", programName); - DISPLAY( "\n"); - DISPLAY( "Arguments :\n"); - DISPLAY( " -b# : expected blocksizeID [4-7] (required)\n"); - DISPLAY( " -B# : expected blocksize [32-4194304] (required)\n"); - DISPLAY( " -v : verbose\n"); - DISPLAY( " -h : display help and exit\n"); - return 0; -} - - -int main(int argc, const char** argv) -{ - int argNb; - unsigned bsid=0; - size_t blockSize=0; - const char* const programName = argv[0]; - - /* Check command line */ - for (argNb=1; argNb='0') && (*argument<='9')) { - bsid *= 10; - bsid += (unsigned)(*argument - '0'); - argument++; - } - break; - - case 'B': - argument++; - blockSize=0; - while ((*argument>='0') && (*argument<='9')) { - blockSize *= 10; - blockSize += (size_t)(*argument - '0'); - argument++; - } - break; - - default: - ; - return FUZ_usage(programName); - } - } - } else { - int err; - FILE *srcFile; - cRess_t ress; - if (bsid == 0 || blockSize == 0) - return FUZ_usage(programName); - DISPLAY("Starting frame checker (%i-bits, %s)\n", (int)(sizeof(size_t)*8), LZ4_VERSION_STRING); - err = createCResources(&ress); - if (err) return (err); - srcFile = fopen(argument, "rb"); - if ( srcFile==NULL ) { - freeCResources(ress); - EXM_THROW(1, "%s: %s \n", argument, strerror(errno)); - } - assert (srcFile != NULL); - err = frameCheck(ress, srcFile, bsid, blockSize); - freeCResources(ress); - fclose(srcFile); - return (err); - } - } - return 0; -} diff --git a/librocksdb-sys/lz4/tests/checkTag.c b/librocksdb-sys/lz4/tests/checkTag.c deleted file mode 100644 index 5e5a034..0000000 --- a/librocksdb-sys/lz4/tests/checkTag.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - checkTag.c - Version validation tool for LZ4 - Copyright (C) Yann Collet 2018-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 homepage : http://www.lz4.org - - LZ4 source repo : https://github.com/lz4/lz4 -*/ - -/* checkTag command : - * $ ./checkTag tag - * checkTag validates tags of following format : v[0-9].[0-9].[0-9]{any} - * The tag is then compared to LZ4 version number. - * They are compatible if first 3 digits are identical. - * Anything beyond that is free, and doesn't impact validation. - * Example : tag v1.8.1.2 is compatible with version 1.8.1 - * When tag and version are not compatible, program exits with error code 1. - * When they are compatible, it exists with a code 0. - * checkTag is intended to be used in automated testing environment. - */ - -#include /* printf */ -#include /* strlen, strncmp */ -#include "lz4.h" /* LZ4_VERSION_STRING */ - - -/* validate() : - * @return 1 if tag is compatible, 0 if not. - */ -static int validate(const char* const tag) -{ - size_t const tagLength = strlen(tag); - size_t const verLength = strlen(LZ4_VERSION_STRING); - - if (tagLength < 2) return 0; - if (tag[0] != 'v') return 0; - if (tagLength <= verLength) return 0; - - if (strncmp(LZ4_VERSION_STRING, tag+1, verLength)) return 0; - - return 1; -} - -int main(int argc, const char** argv) -{ - const char* const exeName = argv[0]; - const char* const tag = argv[1]; - if (argc!=2) { - printf("incorrect usage : %s tag \n", exeName); - return 2; - } - - printf("Version : %s \n", LZ4_VERSION_STRING); - printf("Tag : %s \n", tag); - - if (validate(tag)) { - printf("OK : tag is compatible with lz4 version \n"); - return 0; - } - - printf("!! error : tag and versions are not compatible !! \n"); - return 1; -} diff --git a/librocksdb-sys/lz4/tests/check_liblz4_version.sh b/librocksdb-sys/lz4/tests/check_liblz4_version.sh deleted file mode 100755 index 9304204..0000000 --- a/librocksdb-sys/lz4/tests/check_liblz4_version.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env sh -set -e - -# written as a script shell, because pipe management in python is horrible -ldd $1 | grep liblz4 - diff --git a/librocksdb-sys/lz4/tests/datagencli.c b/librocksdb-sys/lz4/tests/datagencli.c deleted file mode 100644 index ccb27df..0000000 --- a/librocksdb-sys/lz4/tests/datagencli.c +++ /dev/null @@ -1,155 +0,0 @@ -/* - datagencli.c - compressible data command line generator - Copyright (C) Yann Collet 2012-2020 - - GPL v2 License - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - Public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -/************************************** -* Includes -**************************************/ -#include "util.h" /* U32 */ -#include /* fprintf, stderr */ -#include "datagen.h" /* RDG_generate */ -#include "lz4.h" /* LZ4_VERSION_STRING */ - - -/************************************** -* Compiler specific -**************************************/ -#ifdef _MSC_VER /* Visual Studio */ -#define strtoull _strtoui64 /* https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/strtoui64-wcstoui64-strtoui64-l-wcstoui64-l */ -#endif - - -/************************************** -* Constants -**************************************/ -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) - -#define SIZE_DEFAULT (64 KB) -#define SEED_DEFAULT 0 -#define COMPRESSIBILITY_DEFAULT 50 - - -/************************************** -* Macros -**************************************/ -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } -static unsigned displayLevel = 2; - - -/********************************************************* -* Command line -*********************************************************/ -static int usage(char* programName) -{ - DISPLAY( "Compressible data generator\n"); - DISPLAY( "Usage :\n"); - DISPLAY( " %s [size] [args]\n", programName); - DISPLAY( "\n"); - DISPLAY( "Arguments :\n"); - DISPLAY( " -g# : generate # data (default:%i)\n", SIZE_DEFAULT); - DISPLAY( " -s# : Select seed (default:%i)\n", SEED_DEFAULT); - DISPLAY( " -P# : Select compressibility in %% (default:%i%%)\n", COMPRESSIBILITY_DEFAULT); - DISPLAY( " -h : display help and exit\n"); - DISPLAY( "Special values :\n"); - DISPLAY( " -P0 : generate incompressible noise\n"); - DISPLAY( " -P100 : generate sparse files\n"); - return 0; -} - - -int main(int argc, char** argv) -{ - int argNb; - double proba = (double)COMPRESSIBILITY_DEFAULT / 100; - double litProba = 0.0; - U64 size = SIZE_DEFAULT; - U32 seed = SEED_DEFAULT; - char* programName; - - /* Check command line */ - programName = argv[0]; - for(argNb=1; argNb